query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test the internal _parse_image_meta methode Feed it an 'zvol' image as we get it from from imgadm list j | def test_parse_image_meta_zvol(image_zvol):
ret = {
"description": (
"Ubuntu 18.04 LTS (20180808 64-bit). Certified Ubuntu Server "
"Cloud Image from Canonical. For kvm and bhyve."
),
"name": "ubuntu-certified-18.04",
"os": "linux",
"published": "2018-10-11T12:45:24.804Z",
"source": "https://images.joyent.com",
"version": "20180808",
}
assert _parse_image_meta(image_zvol, True) == ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": \"ubuntu-16.04\",\n \"os\": \"linux\",\n \"published\": \"2016-06-01T02:17:41Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"20160601\",\n }\n assert _parse_image_meta(image_lx, True) == ret",
"def test_list_image_metadata(self):\n pass",
"def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25:52Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"18.1.0\",\n }\n assert _parse_image_meta(image_native, True) == ret",
"def test_parse_image_meta_docker(image_docker):\n ret = {\n \"description\": (\n \"Docker image imported from \"\n \"busybox42/zimbra-docker-centos:latest on \"\n \"2019-03-23T01:32:25.320Z.\"\n ),\n \"name\": \"busybox42/zimbra-docker-centos:latest\",\n \"os\": \"linux\",\n \"published\": \"2019-03-23T01:32:25.320Z\",\n \"source\": \"https://docker.io\",\n \"version\": \"62487cf6a7f6\",\n }\n assert _parse_image_meta(image_docker, True) == ret",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def _parseImageXml(self, xml, topImage):\n if not topImage or topImage.pixelInfo.get('magnificaiton'):\n return\n topImage.parse_image_description(xml)\n if not topImage._description_record:\n return\n try:\n xml = topImage._description_record\n # Optrascan metadata\n scanDetails = xml.get('ScanInfo', xml.get('EncodeInfo'))['ScanDetails']\n mag = float(scanDetails['Magnification'])\n # In microns; convert to mm\n scale = float(scanDetails['PixelResolution']) * 1e-3\n topImage._pixelInfo = {\n 'magnification': mag,\n 'mm_x': scale,\n 'mm_y': scale,\n }\n except Exception:\n pass",
"def test_parse_image_meta_orphan(image_orphan):\n ret = {\"Error\": \"This looks like an orphaned image, image payload was invalid.\"}\n assert _parse_image_meta(image_orphan, True) == ret",
"def test_read_image(self):\n pass",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info",
"def test_read_namespaced_image_stream_image(self):\n pass",
"def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass",
"def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))",
"def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids",
"def test_meta(self):\n expected = {\n 'data_path': [str(self.img_path)],\n 'fast_disk': '',\n 'num_workers': -1,\n 'save_path0': str(self.session_path.joinpath('alf')),\n 'move_bin': True,\n 'keep_movie_raw': False,\n 'delete_bin': False,\n 'batch_size': 500,\n 'combined': False,\n 'look_one_level_down': False,\n 'num_workers_roi': -1,\n 'nimg_init': 400,\n 'nonrigid': True,\n 'maxregshift': 0.05,\n 'denoise': 1,\n 'block_size': [128, 128],\n 'save_mat': True,\n 'scalefactor': 1,\n 'mesoscan': True,\n 'nplanes': 1,\n 'tau': 1.5,\n 'functional_chan': 1,\n 'align_by_chan': 1,\n 'nrois': 1,\n 'nchannels': 1,\n 'fs': 6.8,\n 'lines': [[3, 4, 5]],\n 'dx': np.array([0], dtype=int),\n 'dy': np.array([0], dtype=int),\n }\n\n meta = {\n 'scanImageParams': {'hStackManager': {'zs': 320},\n 'hRoiManager': {'scanVolumeRate': 6.8}},\n 'FOV': [{'topLeftDeg': [-1, 1.3], 'topRightDeg': [3, 1.3], 'bottomLeftDeg': [-1, 5.2],\n 'nXnYnZ': [512, 512, 1], 'channelIdx': 2, 'lineIdx': [4, 5, 6]}]\n }\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump(meta, f)\n self.img_path.joinpath('test.tif').touch()\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False)\n self.assertEqual(self.task.status, 0)\n self.assertDictEqual(self.task.kwargs, expected)\n # {k: v for k, v in self.task.kwargs.items() if expected[k] != v}\n # Now overwrite a specific option with task.run kwarg\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False, nchannels=2, delete_bin=True)\n self.assertEqual(self.task.status, 0)\n self.assertEqual(self.task.kwargs['nchannels'], 2)\n self.assertEqual(self.task.kwargs['delete_bin'], True)\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump({}, f)",
"def process_image(self):\n pass",
"def metadata2eic(url):\n logging.info('fetching image metadata from %s' % url)\n ds = json.loads(urllib.urlopen(url).read())\n fields = ['imagename','alt','pitch','roll']\n for d in ds:\n yield map(str,[d[k] for k in fields])",
"def process(self, image):",
"def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def forward_test(self, img, img_metas, **kwargs):",
"def test_read_namespaced_image_stream_tag(self):\n pass",
"def test_aws_service_api_image_get(self):\n pass",
"def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)",
"def test_read(self):\n for line in TESTIMAGES.split(\"\\n\"):\n vals = line.split()\n name = vals[0]\n dim1, dim2 = [int(x) for x in vals[1:3]]\n mini, maxi, mean, stddev = [float(x) for x in vals[3:]]\n obj = marccdimage()\n obj.read(self.fn[name])\n self.assertAlmostEqual(mini, obj.getmin(), 2, \"getmin\")\n self.assertAlmostEqual(maxi, obj.getmax(), 2, \"getmax\")\n self.assertAlmostEqual(mean, obj.getmean(), 2, \"getmean\")\n self.assertAlmostEqual(stddev, obj.getstddev(), 2, \"getstddev\")\n self.assertEqual(dim1, obj.dim1, \"dim1\")\n self.assertEqual(dim2, obj.dim2, \"dim2\")",
"def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)"
]
| [
"0.70250654",
"0.68019986",
"0.6737672",
"0.6549403",
"0.64224327",
"0.6239823",
"0.622689",
"0.62021476",
"0.61582226",
"0.60746354",
"0.60221714",
"0.6009213",
"0.59953326",
"0.59682703",
"0.595405",
"0.59428936",
"0.5934149",
"0.5909532",
"0.58560014",
"0.5851444",
"0.58398783",
"0.5831676",
"0.5829496",
"0.5825713",
"0.5817659",
"0.577216",
"0.57713044",
"0.57674515",
"0.57645255",
"0.57642394"
]
| 0.7878405 | 0 |
Test the internal _parse_image_meta methode Feed it an 'docker' image as we get it from from imgadm list j | def test_parse_image_meta_docker(image_docker):
ret = {
"description": (
"Docker image imported from "
"busybox42/zimbra-docker-centos:latest on "
"2019-03-23T01:32:25.320Z."
),
"name": "busybox42/zimbra-docker-centos:latest",
"os": "linux",
"published": "2019-03-23T01:32:25.320Z",
"source": "https://docker.io",
"version": "62487cf6a7f6",
}
assert _parse_image_meta(image_docker, True) == ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_image_meta_lx(image_lx):\n ret = {\n \"description\": (\n \"Container-native Ubuntu 16.04 64-bit image. Built to run on \"\n \"containers with bare metal speed, while offering all the \"\n \"services of a typical unix host.\"\n ),\n \"name\": \"ubuntu-16.04\",\n \"os\": \"linux\",\n \"published\": \"2016-06-01T02:17:41Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"20160601\",\n }\n assert _parse_image_meta(image_lx, True) == ret",
"def test_parse_image_meta_native(image_native):\n ret = {\n \"description\": (\"A SmartOS image pre-configured for building pkgsrc packages.\"),\n \"name\": \"pkgbuild\",\n \"os\": \"smartos\",\n \"published\": \"2018-04-09T08:25:52Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"18.1.0\",\n }\n assert _parse_image_meta(image_native, True) == ret",
"def test_list_image_metadata(self):\n pass",
"def parse_docker_image(image):\n # add defaults\n if '/' not in image:\n image = 'library/' + image\n if ':' not in image:\n image = image + ':latest'\n\n # parse\n tokens1 = image.split('/')\n namespace = tokens1[0]\n\n tokens2 = tokens1[1].split(':')\n name = tokens2[0]\n tag = tokens2[1]\n\n return namespace, name, tag",
"def test_parse_image_meta_orphan(image_orphan):\n ret = {\"Error\": \"This looks like an orphaned image, image payload was invalid.\"}\n assert _parse_image_meta(image_orphan, True) == ret",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }",
"def test_read_image(self):\n pass",
"def test_read_namespaced_image_stream_image(self):\n pass",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids",
"def test_parse_image_meta_zvol(image_zvol):\n ret = {\n \"description\": (\n \"Ubuntu 18.04 LTS (20180808 64-bit). Certified Ubuntu Server \"\n \"Cloud Image from Canonical. For kvm and bhyve.\"\n ),\n \"name\": \"ubuntu-certified-18.04\",\n \"os\": \"linux\",\n \"published\": \"2018-10-11T12:45:24.804Z\",\n \"source\": \"https://images.joyent.com\",\n \"version\": \"20180808\",\n }\n assert _parse_image_meta(image_zvol, True) == ret",
"def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]",
"def test_docker_image_hint(mock_tools):\n # Mock the return values for Docker verification\n mock_tools.subprocess.check_output.side_effect = [\n VALID_DOCKER_VERSION,\n VALID_DOCKER_INFO,\n VALID_BUILDX_VERSION,\n VALID_USER_MAPPING_IMAGE_CACHE,\n ]\n\n Docker.verify(mock_tools, image_tag=\"myimage:tagtorulethemall\")\n\n mock_tools.subprocess.run.assert_has_calls(\n [\n call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--volume\",\n f\"{Path.cwd() / 'build'}:/host_write_test:z\",\n \"myimage:tagtorulethemall\",\n \"touch\",\n PurePosixPath(\"/host_write_test/container_write_test\"),\n ],\n check=True,\n ),\n call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--volume\",\n f\"{Path.cwd() / 'build'}:/host_write_test:z\",\n \"myimage:tagtorulethemall\",\n \"rm\",\n \"-f\",\n PurePosixPath(\"/host_write_test/container_write_test\"),\n ],\n check=True,\n ),\n ]\n )",
"def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None",
"def test_read_namespaced_image_stream_tag(self):\n pass",
"def test_meta(self):\n expected = {\n 'data_path': [str(self.img_path)],\n 'fast_disk': '',\n 'num_workers': -1,\n 'save_path0': str(self.session_path.joinpath('alf')),\n 'move_bin': True,\n 'keep_movie_raw': False,\n 'delete_bin': False,\n 'batch_size': 500,\n 'combined': False,\n 'look_one_level_down': False,\n 'num_workers_roi': -1,\n 'nimg_init': 400,\n 'nonrigid': True,\n 'maxregshift': 0.05,\n 'denoise': 1,\n 'block_size': [128, 128],\n 'save_mat': True,\n 'scalefactor': 1,\n 'mesoscan': True,\n 'nplanes': 1,\n 'tau': 1.5,\n 'functional_chan': 1,\n 'align_by_chan': 1,\n 'nrois': 1,\n 'nchannels': 1,\n 'fs': 6.8,\n 'lines': [[3, 4, 5]],\n 'dx': np.array([0], dtype=int),\n 'dy': np.array([0], dtype=int),\n }\n\n meta = {\n 'scanImageParams': {'hStackManager': {'zs': 320},\n 'hRoiManager': {'scanVolumeRate': 6.8}},\n 'FOV': [{'topLeftDeg': [-1, 1.3], 'topRightDeg': [3, 1.3], 'bottomLeftDeg': [-1, 5.2],\n 'nXnYnZ': [512, 512, 1], 'channelIdx': 2, 'lineIdx': [4, 5, 6]}]\n }\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump(meta, f)\n self.img_path.joinpath('test.tif').touch()\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False)\n self.assertEqual(self.task.status, 0)\n self.assertDictEqual(self.task.kwargs, expected)\n # {k: v for k, v in self.task.kwargs.items() if expected[k] != v}\n # Now overwrite a specific option with task.run kwarg\n with mock.patch.object(self.task, 'get_default_tau', return_value=1.5):\n _ = self.task.run(run_suite2p=False, rename_files=False, nchannels=2, delete_bin=True)\n self.assertEqual(self.task.status, 0)\n self.assertEqual(self.task.kwargs['nchannels'], 2)\n self.assertEqual(self.task.kwargs['delete_bin'], True)\n with open(self.img_path.joinpath('_ibl_rawImagingData.meta.json'), 'w') as f:\n json.dump({}, f)",
"def test_aws_service_api_image_get(self):\n pass",
"def test_retag_valid_image(self):\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertTrue(alpine.tag(\"demo\", \"rename\"))\n\n alpine = self.docker.images.get(constant.ALPINE)\n self.assertNotIn(\"demo:test\", alpine.tags)",
"def test_read_namespaced_image_stream(self):\n pass",
"def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)",
"def test_one_image(self, img):\n return self.__image_pipeline(img)",
"def _get_docker_images_for_lint(\n self,\n script_obj: Dict,\n script_id: str,\n docker_image_flag: str,\n docker_image_target: Optional[str],\n ) -> List[str]:\n log_prompt = f\"{self._pack_name} - Get All Docker Images For Lint\"\n logger.info(\n f\"{log_prompt} - Requested docker image flag is: '{docker_image_flag}'\"\n )\n imgs = []\n\n if (\n docker_image_flag == DockerImageFlagOption.FROM_YML.value\n ): # the default option\n # Desirable docker images are the docker images from the yml file (alt-dockerimages included)\n logger.info(f\"{self._pack_name} - Get Docker Image from YML - Started\")\n if imgs := get_docker_images_from_yml(script_obj):\n logger.info(\n f\"{log_prompt} - Docker images to run on are: {', '.join(imgs)}\"\n )\n return imgs\n\n di_from_yml = script_obj.get(\"dockerimage\")\n # If the 'dockerimage' key does not exist in yml - run on native image checks will be skipped\n native_image_config = (\n NativeImageConfig()\n ) # parsed docker_native_image_config.json file (a singleton obj)\n supported_native_images_obj = ScriptIntegrationSupportedNativeImages(\n _id=script_id,\n native_image_config=native_image_config,\n docker_image=di_from_yml,\n )\n supported_native_images = set(\n supported_native_images_obj.get_supported_native_image_versions(\n only_production_tags=False\n )\n )\n\n if docker_image_flag.startswith(DockerImageFlagOption.NATIVE.value):\n # Desirable docker image to run on is a native image\n\n self._check_native_image_flag(docker_image_flag)\n\n image_support = docker_image_flag\n if docker_image_flag == DockerImageFlagOption.NATIVE_TARGET.value:\n image_support = DockerImageFlagOption.NATIVE_DEV.value\n\n if native_image := self._get_native_image_name_from_config_file(\n image_support\n ):\n\n if self._is_native_image_support_script(\n native_image, supported_native_images, script_id\n ): # Integration/Script is supported by the requested native image\n native_image_ref: Optional[str] = \"\"\n\n if (\n docker_image_flag == DockerImageFlagOption.NATIVE_TARGET.value\n and docker_image_target\n ):\n # Desirable docker image to run is the target image only on native supported content.\n native_image_ref = docker_image_target\n\n elif docker_image_flag == DockerImageFlagOption.NATIVE_DEV.value:\n # Desirable docker image to run on is the dev native image - get the latest tag from Docker Hub\n native_image_ref = self._get_dev_native_image(script_id)\n\n else:\n # Desirable docker image to run on is a versioned native image - get the docker ref from the\n # docker_native_image_config.json\n native_image_ref = self._get_versioned_native_image(\n native_image\n )\n\n if native_image_ref:\n imgs.append(native_image_ref)\n logger.info(\n f\"{log_prompt} - Native image to run on is: {native_image_ref}\"\n )\n\n elif docker_image_flag == DockerImageFlagOption.ALL_IMAGES.value:\n # Desirable docker images are the docker images from the yml file, the supported versioned native images\n # and the dev native image\n if imgs := self._get_all_docker_images(\n script_obj, script_id, supported_native_images\n ):\n logger.info(\n f\"{log_prompt} - Docker images to run on are: {', '.join(imgs)}\"\n )\n\n else:\n # The flag is a specific docker image (from Docker Hub) or an invalid input -\n # In both cases we will try to run on the given input, if it does not exist in docker hub the run of lint\n # will fail later on.\n imgs.append(docker_image_flag)\n logger.info(\n f\"{log_prompt} - Docker image to run on is: {docker_image_flag}\"\n )\n\n return imgs",
"def _set_image(self):\n\n if not self.spec.get(\"image\"):\n try:\n self.spec[\"image\"] = self._get_image_from_artifact()\n except (AttributeError, KeyError) as err:\n raise LookupError(\n f\"Could not get {self.name} container image: {err}\"\n ) from err\n\n try:\n version = self.spec[\"image\"].split(\"/\")[-1].split(\":\")[1]\n except IndexError:\n version = \"latest\"\n\n self.labels.setdefault(\"app.kubernetes.io/version\", version)",
"def loadMetadata(job, docker_client, pullList, loadList, notExistSet):\n # flag to indicate an error occurred\n errorState = False\n images = []\n for name in pullList:\n if name not in notExistSet:\n job = Job().updateJob(\n job,\n log='Image %s was pulled successfully \\n' % name,\n\n )\n\n try:\n cli_dict = getCliData(name, docker_client, job)\n images.append((name, cli_dict))\n job = Job().updateJob(\n job,\n log='Got pulled image %s metadata \\n' % name\n\n )\n except DockerImageError as err:\n job = Job().updateJob(\n job,\n log='FAILURE: Error with recently pulled image %s\\n%s\\n' % (name, err),\n )\n errorState = True\n\n for name in loadList:\n # create dictionary and load to database\n try:\n cli_dict = getCliData(name, docker_client, job)\n images.append((name, cli_dict))\n job = Job().updateJob(\n job,\n log='Loaded metadata from pre-existing local image %s\\n' % name\n )\n except DockerImageError as err:\n job = Job().updateJob(\n job,\n log='FAILURE: Error with recently loading pre-existing image %s\\n%s\\n' % (\n name, err),\n )\n errorState = True\n return images, errorState",
"def __init__(self, image=None):\n self.openapi_types = {\"image\": ImageInfoSummary}\n\n self.attribute_map = {\"image\": \"image\"}\n\n self._image = image",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))",
"def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)"
]
| [
"0.71701115",
"0.70583767",
"0.6989357",
"0.6883845",
"0.632691",
"0.63088745",
"0.62557995",
"0.622059",
"0.6174179",
"0.61586636",
"0.6079249",
"0.60274893",
"0.5975001",
"0.59731615",
"0.59721625",
"0.59384054",
"0.59312916",
"0.59061134",
"0.59037596",
"0.58676225",
"0.5858273",
"0.58440065",
"0.58419406",
"0.5817597",
"0.5760841",
"0.5752351",
"0.57518697",
"0.57328326",
"0.5720405",
"0.5716317"
]
| 0.8324207 | 0 |
Return valid filename for image. If there are multiple files in the database they append it with ',{number}'. For example image.png,0. This method will put the `number` between the filename and the extension. Thus 'image.png,0' becomes 'image0.png'. Returns str Converted filename. | def get_filename(self) -> str:
fname = self.url.split("/")[-1]
if "," in fname:
_fname, _i = fname.split(",")
_split_fname = _fname.split(".")
_name = _split_fname[0]
_extension = _split_fname[-1]
return _name + _i + "." + _extension
else:
return fname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )",
"def imId2name(self, im_id):\n \n if isinstance(im_id, int):\n name = str(im_id).zfill(self.STR_ID_LEN) + '.jpg'\n elif isinstance(im_id, str):\n name = im_id + '.jpg'\n else:\n raise AssertionError('Image ID should be of type string or int')\n return name",
"def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))",
"def file_suffix(self):\n return f'{self.image_count:05}' if self.sequential_naming else \\\n datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")",
"def filename(self):\n return '%s%s' % (self.identifier, self.extension)",
"def format_filename(self, s):\n valid_chars = \"-_ %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename",
"def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path",
"def format_image_filename(device_image):\n return \"{}-{}-{}-{}.bit\".format(device_image.bitstream_type,\n device_image.pci_vendor,\n device_image.pci_device,\n device_image.uuid)",
"def _file_name(size):\n timestamp = str(int(time.time()))\n return '%s_%dx%d.%s' % (timestamp, size, size, 'jpg')",
"def image_file_name(instance, filename):\n\text = filename[-4:]\n\tnew_filename = os.path.join('images',str(instance.image_folder),str(instance.user).replace(\" \",\"\").lower()+ext)\n\treturn new_filename",
"def image_name(name):\n \n # Gets the '.' position\n dot = name.find('.')\n # Slice the name from beginning and before '.'\n img = name[:dot]\n # return string with jpg format\n return \"{}.jpg\".format(img)",
"def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'",
"def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))",
"def generate_image_name(self, image):\n return image.replace('shub://', '').replace('/', '-') + '.simg'",
"def make_img_name(file_ext='.png'):\r\n fn = []\r\n # format seqs and write out to temp file\r\n for i in range(0, 30):\r\n fn.append(choice(ALPHABET))\r\n return ''.join(fn) + file_ext",
"def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename",
"def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename",
"def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename",
"def make_frame_name(frame_num):\n return str(frame_num).zfill(3) + '.jpg'",
"def gen_file_name(filename, path=UPLOAD_FOLDER):\n\n i = 1\n while os.path.exists(os.path.join(path, filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i += 1\n\n return filename",
"def format_filename(s):\n valid_chars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_')\n return filename",
"def get_file_name(image_dir, image_name_prefix, current_count):\n if imageNumOn:\n # you could also use os.path.join to construct image path file_path\n file_path = image_dir+ \"/\"+image_name_prefix+str(current_count)+\".jpg\"\n else:\n right_now = datetime.datetime.now()\n file_path = (\"%s/%s%04d%02d%02d-%02d%02d%02d.jpg\"\n % (image_dir, image_name_prefix,\n right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))\n return file_path",
"def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)",
"def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"",
"def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)",
"def create_filename(value):\n return '%s.mp3' % slugify(value, u'_')",
"def _safe_file_name(self):\n FMT_STR = \"%s - %s - %s (%d) - %s%s\"\n return cleanse_filename(FMT_STR % (self.track,\n self.artist.replace(\"/\", \"\\\\\"),\n self.album.replace(\"/\", \"\\\\\"),\n self.year,\n self.title.replace(\"/\", \"\\\\\"),\n os.path.splitext(self.file_name)[1]))",
"def get_filename(key):\n filename = str(key)\n filename = filename.replace('/', '_')\n filename = filename.replace('InceptionResnetV2_', '')\n\n # remove \"Repeat\" scope from filename\n filename = re_repeat.sub('B', filename)\n\n if re_block8.match(filename):\n # the last block8 has different name with the previous 9 occurrences\n filename = filename.replace('Block8', 'Block8_10')\n elif filename.startswith('Logits'):\n # remove duplicate \"Logits\" scope\n filename = filename.replace('Logits_', '', 1)\n\n # from TF to Keras naming\n filename = filename.replace('_weights', '_kernel')\n filename = filename.replace('_biases', '_bias')\n\n return filename + '.npy'",
"def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')",
"def _get_image_name(image_meta, max_len=pvm_const.MaxLen.FILENAME_DEFAULT):\n return pvm_util.sanitize_file_name_for_api(\n image_meta.name, prefix=DiskType.IMAGE + '_',\n suffix='_' + image_meta.checksum, max_len=max_len)"
]
| [
"0.712252",
"0.6974862",
"0.67796457",
"0.6768139",
"0.6755476",
"0.6694814",
"0.66734874",
"0.6669626",
"0.6649419",
"0.664161",
"0.65641737",
"0.65102273",
"0.6467254",
"0.6443507",
"0.64337325",
"0.6411749",
"0.6411749",
"0.6411749",
"0.64037764",
"0.6389226",
"0.63800997",
"0.6379864",
"0.6378622",
"0.63690555",
"0.6341904",
"0.6331023",
"0.63094366",
"0.63050514",
"0.6296666",
"0.6286719"
]
| 0.70944995 | 1 |
Loads image from url. Returns Image PIL Image object. Source | def from_url(self) -> PngImagePlugin.PngImageFile:
response = requests.get(self.url)
img = Image.open(BytesIO(response.content))
return img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_pil_image(self, url):\r\n return Image.open(urlopen(url))",
"def getImage(url):\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img",
"def fetch_image(url: str) -> Image.Image:\n r = httpx.get(url)\n if not r.status_code == httpx.codes.OK:\n raise HTTPException(r.status_code, detail=r.reason_phrase)\n f = BytesIO(r.content)\n im = handle_image_file(f)\n return im",
"def read_image(url):\n f = urllib2.urlopen(url)\n img = StringIO(f.read())\n return Image.open(img)",
"def urlToImage(url):\n\n response = requests.get(url)\n image = Image.open(BytesIO(response.content))\n return image",
"def load(url):\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image",
"def get_image(self, url):\n\n log(\"Getting image {}\".format(url))\n response = requests.get(url)\n if response.status_code == 200:\n image = self._pilimg.open(io.BytesIO(response.content))\n return image.convert('RGBA')\n return None",
"def _url_to_image(url: str) -> Image.Image:\n assert url.lower().startswith(\"http\"), \"invalid url, must start with http\"\n content = requests.get(url).content\n image = Image.open(BytesIO(content))\n return image",
"def load_remote_image(image_url):\n response = requests.get(image_url, stream=True)\n img = Image.open(BytesIO(response.content))\n image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n return image",
"def downloadImage(self, url):\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = response.read()\n io = cStringIO.StringIO(data)\n return PIL.Image.open(io)",
"def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img",
"def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content",
"def set_image_from_url(self, url: str):\n response = httpx.get(url)\n if response.status_code == 200:\n file = ContentFile(response.content)\n file.name = \"url-\" + shortuuid.uuid()\n self.image = file\n self.save()",
"def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image",
"def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)",
"def get_image_by_url(url):\n retry_count = 0\n while True:\n try:\n req_headers = {\"User-Agent\": DEFAULT_REQUEST_UA}\n r = requests.get(\n url, headers=req_headers, stream=True, timeout=DEFAULT_REQUEST_TIMEOUT\n )\n image_data = r.content\n if isinstance(image_data, bytes):\n image_data = BytesIO(image_data)\n else:\n image_data = StringIO(image_data)\n\n im = Image.open(image_data)\n return im\n except Timeout as e:\n if retry_count <= DEFAULT_REQUEST_RETRY:\n continue\n else:\n raise e\n except Exception as e:\n logging.exception(e)\n raise RequestException(e)",
"def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)",
"def url2img(url : str, timeout = 1) -> Image:\n\n response = requests.get(url, timeout = timeout)\n return Image.open(BytesIO(response.content))",
"def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)",
"def _import_image_by_url(self, url, session, field, line_number):\n maxsize = int(config.get(\"import_image_maxbytes\", DEFAULT_IMAGE_MAXBYTES))\n try:\n response = session.get(url, timeout=int(config.get(\"import_image_timeout\", DEFAULT_IMAGE_TIMEOUT)))\n response.raise_for_status()\n\n if response.headers.get('Content-Length') and int(response.headers['Content-Length']) > maxsize:\n raise ValueError(_(\"File size exceeds configured maximum (%s bytes)\") % maxsize)\n\n content = bytearray()\n for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE):\n content += chunk\n if len(content) > maxsize:\n raise ValueError(_(\"File size exceeds configured maximum (%s bytes)\") % maxsize)\n\n image = Image.open(io.BytesIO(content))\n w, h = image.size\n if w * h > 42e6: # Nokia Lumia 1020 photo resolution\n raise ValueError(\n u\"Image size excessive, imported images must be smaller \"\n u\"than 42 million pixel\")\n\n return base64.b64encode(content)\n except Exception as e:\n raise ValueError(_(\"Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s\") % {\n 'url': url,\n 'field_name': field,\n 'line_number': line_number + 1,\n 'error': e\n })",
"def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass",
"def get_image(self, image_id):\n url = self.get_url(image_id)\n return image_util.load_image_from_url(url) if url else None",
"def get_image(\n url: str\n) -> Union[Dict[str, Union[int, str, BytesIO, None]], None]:\n try:\n logger.info('downloading image: %s', url)\n r = requests.get(url)\n\n if r.status_code == 200:\n\n # loading binary data to mem\n img = BytesIO(r.content)\n\n # loading image to PIL\n pil_img = Image.open(img)\n\n # seek to 0\n img.seek(0)\n\n return {\n 'content-type': r.headers.get('Content-Type'),\n 'image': img,\n 'width': pil_img.width,\n 'height': pil_img.height,\n }\n\n raise Exception('wrong status code %s', r.status_code)\n\n except BaseException as e:\n logger.error('could not download and analyze img: %s', str(e))\n\n return None",
"def joblib_read_img_url(url):\n\n from matplotlib.image import imread\n fd = urlopen(url, timeout=10)\n return imread(io.BytesIO(fd.read()))",
"def get_image_from_camera(self, url):\n if DEBUG:\n print(\"[DEBUG] Getting image from BlueIris url: %s\" % url)\n\n resp = urllib.request.urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)\n self.timestamp = time.time()\n self.trigger_image = image\n self.processed_image = image # Start off by having processed image same as initial image\n\n self._init_new_image()\n # if DEBUG:\n # # print(\"[DEBUG] [ImageFrame.get_image_from_camera] Image width: {}, height: {}\".format(\n # self.width, self.height))\n\n # return the image\n return self.trigger_image",
"def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)",
"def load_image(file_path):\r\n return Image.open(file_path)",
"def load(path) -> Image:\n return Image.open(path)",
"def get_image(image_path):\r\n\r\n return Image.open(image_path)",
"def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image"
]
| [
"0.77640134",
"0.7749962",
"0.7725904",
"0.77168554",
"0.74839956",
"0.73880714",
"0.7365719",
"0.7333797",
"0.7217801",
"0.7201707",
"0.7050136",
"0.69685555",
"0.6968352",
"0.695745",
"0.69478923",
"0.69186074",
"0.69104034",
"0.68546426",
"0.6788575",
"0.67859113",
"0.6744289",
"0.6718778",
"0.66173077",
"0.6572987",
"0.6524668",
"0.6519094",
"0.6497541",
"0.64805853",
"0.6446241",
"0.6422348"
]
| 0.78676915 | 0 |
Reduce the amount of whitespace around an image. | def reduce_whitespace(self, border: int = 5) -> None:
if self.img is None:
raise FileExistsError("Load an image first with from_url.")
pix = np.asarray(self.img)
pix = pix[:, :, 0:3] # Drop the alpha channel
idx = np.where(pix - 255)[0:2] # Drop the color when finding edges
bbox = list(map(min, idx))[::-1] + list(map(max, idx))[::-1]
larger_box = add_whitespace(bbox, border)
self.img = self.img.crop(larger_box) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trim(self):\n result = library.MagickTrimImage(self.wand)\n if not result:\n self.raise_exception()",
"def _trim_margins(self, img):\n oldsize = (0, 0)\n while oldsize != img.shape: # while the size is changing\n oldsize = img.shape\n for i in range(4): # 4 times\n img = num.rot90(img) # rotate 90\n if num.std(img[0, :]) < self.trim_std: # if low std\n img = img[1:, :] # trim edge\n\n return img",
"def turn_squeeze_image_off(self):\n self.squeeze_image = False",
"def Compact(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Compact(self, *args)",
"def obscure_image(image):\n size = image.size\n pixel_size = 9\n if size[0] < 9 or size[1] < 9:\n return image\n image_f = image.filter(ImageFilter.MaxFilter)\n image_s = image_f.resize((size[0] / pixel_size, size[1] / pixel_size), Image.NEAREST)\n image_l = image_s.resize((size[0], size[1]), Image.NEAREST)\n\n return image_l",
"def adjust(self, image):\n ...",
"def turn_squeeze_image_on(self):\n self.squeeze_image = True",
"def strip(self):\n result = library.MagickStripImage(self.wand)\n if not result:\n self.raise_exception()",
"def trim_image(image):\n bbox = image.getbbox()\n return image.crop(bbox)",
"def trim(im):\n \n bg = Image.new(im.mode, im.size, im.getpixel((0,0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)",
"def small_image(self):\n pass",
"def process_image(im, border_size=5, im_size=50):\n\n\tim = im[border_size:-border_size, border_size:-border_size]\n\n\t\n\t'''for i in range(0,len(im)):\n\t\tfor j in range(0,len(im[i])):\n\t\t\tim[i][j] = 255 if im[i][j] > 64 else 0'''\n\t\t\t\t\n\tim = resize(im, (im_size, im_size))\n\n\treturn im",
"def clean(img):\n\n label_img = label(img, connectivity=2)\n props = sorted(regionprops(label_img), key=lambda x: x.area)\n clean = morphology.binary_closing(img)\n\n clean = morphology.remove_small_holes(clean)\n return morphology.remove_small_objects(clean,\n int(np.floor(props[-1].area) / 10), connectivity=2)",
"def process_image(image):\n image = resize(image)\n return image",
"def applyMorphologicalCleaning(self, image):",
"def remove_small_regions(img, size):\n img = morphology.remove_small_objects(img, size)\n img = morphology.remove_small_holes(img, size)\n return img",
"def preprocessImage(img):\n shape = img.shape\n img = img[math.floor(shape[0]/4) : shape[0] - 25, 0:shape[1]]\n img = cv2.resize(img, (img_columns, img_rows), interpolation = cv2.INTER_AREA)\n return img",
"def truncate(image_1):\r\n n = 60 #100\r\n i,j = image_1.shape\r\n image_2 = image_1[n:i-n,n:j-n]\r\n return image_2",
"def resize_image(image, vertical_seams_to_remove, horizontal_seams_to_remove):\n #convert image to grayscale\n print(\"-converting into grayscale...\")\n image_grayscale = convert_to_grayscale(image)\n #calculate image energy\n print(\"-calculating energy...\")\n image_energy = energy(image_grayscale)\n #remove given number of vertical seams\n for i in range(vertical_seams_to_remove):\n print(\"-removing vertical seam \" + str(i+1) + \"...\")\n image, image_energy = remove_vertical_seam(image, image_energy)\n #remove given number of horizontal seams\n for j in range(horizontal_seams_to_remove):\n print(\"-removing horizontal seam \" + str(j+1) + \"...\")\n image, image_energy = remove_horizontal_seam(image, image_energy)\n return image",
"def prepare_image(img):\n img = img.filter(ImageFilter.SMOOTH_MORE)\n img = img.filter(ImageFilter.SMOOTH_MORE)\n if 'L' != img.mode:\n img = img.convert('L')\n return img",
"def remove_padding(im, pad):\n\n return im[pad:-pad, pad:-pad]",
"def clean_windowerrors(self, amount=1.0-15/2e5, nskip=3):\r\n\r\n pts = range(2, self.imageData.shape[0], nskip)\r\n\r\n self.imageData[pts,:,:] = self.imageData[pts, :, :] * amount",
"def _trim_border(img):\n for i in range(img.shape[0]):\n if np.any(img[i, :, :] != 255):\n img = img[i:, :, :]\n break\n\n for i in range(img.shape[0] - 1, 0, -1):\n if np.any(img[i, :, :] != 255):\n img = img[: i + 1, :, :]\n break\n\n for i in range(img.shape[1]):\n if np.any(img[:, i, :] != 255):\n img = img[:, i:, :]\n break\n\n for i in range(img.shape[1] - 1, 0, -1):\n if np.any(img[:, i, :] != 255):\n img = img[:, : i + 1, :]\n break\n\n return img",
"def large_image(self):\n pass",
"def main():\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n # shrink function\r\n after_shrink = shrink('images/poppy.png')\r\n after_shrink.show()",
"def get_squeeze_image(self):\n return self.squeeze_image",
"def resize_img(self,scale=1):\n reduced = self.image.reduce((scale,scale))\n reduced.save(\"../edited/{}\".format(self.image.filename))\n\n reduced = Image.open(\"../edited/{}\".format(self.image.filename))\n return reduced",
"def undo_normalise(img):\n\treturn img + CONFIG.MEAN_PIXEL",
"def square_image(img):\r\n x,y = img.size\r\n while y > x:\r\n #slice 10px at a time until square\r\n slice_height = min(y - x, 10)\r\n\r\n bottom = img.crop((0, y - slice_height, x, y))\r\n top = img.crop((0, 0, x, slice_height))\r\n\r\n #remove the slice with the least entropy\r\n if image_entropy(bottom) < image_entropy(top):\r\n img = img.crop((0, 0, x, y - slice_height))\r\n else:\r\n img = img.crop((0, slice_height, x, y))\r\n\r\n x,y = img.size\r\n\r\n return img",
"def check_image_size(self, x):\n _, _, h, w = x.size()\n mod_pad_h = (self.window_size -\n h % self.window_size) % self.window_size\n mod_pad_w = (self.window_size -\n w % self.window_size) % self.window_size\n x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')\n return x"
]
| [
"0.6564348",
"0.6462592",
"0.64356196",
"0.64125997",
"0.64088434",
"0.63586885",
"0.6321339",
"0.6209154",
"0.61482376",
"0.6099185",
"0.60608566",
"0.60138977",
"0.5960579",
"0.59480804",
"0.5920281",
"0.591606",
"0.59075433",
"0.5890236",
"0.5870133",
"0.57548124",
"0.57126665",
"0.56995004",
"0.56812716",
"0.56792086",
"0.5645712",
"0.5637221",
"0.56370246",
"0.5622208",
"0.5620511",
"0.56169826"
]
| 0.7336111 | 0 |
When creating a log pass all essential information about the instance of the climb, where the style will need to be corrected. | def __init__(self, date: dt_date, style: str, partners: list, notes: str, climb: Climb):
self._date = date
self._styles = {
'Lead RP': 'read point',
'AltLd O/S': 'onsight',
'Solo O/S': 'onsight',
'Lead rpt': 'no log',
'Lead O/S': 'onsight',
'2nd β': 'flash',
'Solo rpt': 'no log',
'Lead Flash': 'flash',
'Lead dog': 'no send',
'2nd O/S': 'onsight',
'AltLd rpt': 'no log',
'AltLd': 'no log',
'2nd': 'no log',
'Sent x': 'read point',
'Sent Flash': 'flash',
'-': 'summit',
'Solo': 'no log',
'Sent O/S': 'onsight',
'AltLd dnf': 'no send',
'Lead dnf': 'no send',
'DWS': 'no log',
'2nd rpt': 'no log',
'2nd dog': 'no send',
'AltLd dog': 'no send',
'Sent rpt': 'no log',
'Lead G/U': 'ground up',
'Sent': 'no log',
'Solo dnf': 'no send',
'Lead': 'no log'} # A matcher of different style types
self._style = self.match_style(style) # Correct the style for a more readable format
self._partners = partners
self._notes = notes
self._climb = climb | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, study_dir):\n super(LogfileStyleFormatter, self).__init__(\n fmt='%(levelname)s: %(file_indicator)s:'\n '%(line_indicator)s%(column_indicator)s'\n ' %(message)s%(cause_indicator)s')\n self.study_dir = study_dir\n self.previous_filename = None",
"def __init__(self, logarea):\n self.logarea = logarea\n if not logarea:\n return\n logarea.tag_config('info', foreground='green')\n logarea.tag_config('cmd', foreground='blue')\n logarea.tag_config('output', foreground='grey')\n logarea.tag_config('error', foreground='red')\n logarea.tag_config('alert', foreground='orange')",
"def __init__(self):\n s = \"{0}\\n{1:^150}\\n{0}\\n\".format(\"=\"*150, \"N E B I L A N D\")\n self.log(s)\n self.table_log(\"Iteration\", \"Datetime\",\n \"Event\", \"Entity Affected\", \"Extra Info\")\n self.log(\"-\"*150)",
"def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0",
"def __init__(self, message):\n config.log.critical(\"%s\" % (message))",
"def climb(self):\n print(\"Inside WoodElf.climb\")",
"def log_message(self, formate, *args):\n return",
"def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r",
"def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n color_code = self.color(self.log_colors, record.levelname)\n if hasattr(record, 'ctx'):\n metadata = record.ctx.invocation_metadata()\n for item in metadata:\n if item.key == 'author_name':\n setattr(record, 'user', item.value)\n elif item.key == 'correlation_id':\n setattr(record, 'correlationId', item.value)\n\n for key, value in record.__dict__.items():\n #this allows to have numeric keys\n if (key not in RESERVED_ATTR_HASH\n and not (hasattr(key, \"startswith\")\n and key.startswith('_'))):\n message = append(color_code=color_code, message=message, key=key, value=value)\n return message",
"def format(self, record):\n\n\t\t# Use copy.copy - c.f. https://stackoverflow.com/a/7961390\n\t\tcolored_record = copy.copy(record)\n\n\t\tcolor = None\n\t\ttry:\n\t\t\tcolor = record.color\n\t\texcept AttributeError as e:\n\t\t\tpass\n\n\t\tif color is not None:\n\t\t\tif color is None or not color or color == \"none\":\n\t\t\t\tpass\n\t\t\telif color == \"white\":\n\t\t\t\twhite = \"\\033[37m\"\n\t\t\t\tclear = \"\\033[0;0m\"\n\t\t\t\tcolored_record.msg = \"{0:s}{1:s}{2:s}\".format(\n\t\t\t\t\twhite,\n\t\t\t\t\tcolored_record.msg,\n\t\t\t\t\tclear,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\traise WCMIError(\"error: ConsoleFilter: unrecognized color `{0:s}'.\".format(str(color)))\n\n\t\treturn super().format(colored_record)",
"def __init__(self, infos, logger):\n super().__init__(\n infos, \n 'Delete Green Cloudformation Stack', \n logger,\n infos.green_infos\n )",
"def __init__(self):\r\n self.escape = chr(27)\r\n self.baseVolume = 1.0\r\n self.colourRe = re.compile(r'(%s\\[((?:\\d;)?\\d{1,2})m)' % self.escape)\r\n encodeTest = lambda value: None if codecs.getencoder(value) else 'Traceback should be self explanitory'\r\n self.logEncoding = 'UTF-8'\r\n self.normalise = lambda value: unicodedata.normalize(self.config.get('entry', 'unicodeform'), unicode(value)).encode(self.config.get('entry', 'encoding'), 'ignore')\r\n self.soundOutput = output.Output() # Just keep this stored.\r\n self._outputThread = None # The output thread, to be checked at the start of _threadManager.\r\n self.logFile = 'Output %s.log' % strftime('%Y-%m-%d %H-%M-%S').replace(':', '-')\r\n self.write = lambda value: self.output(value) # For tracebacks and the like.\r\n self.colours = {\r\n '0': ('white', 'black'),\r\n '30': ('Black', None),\r\n '31': ('red', None),\r\n '32': ('green', None),\r\n '33': ('yellow', None),\r\n '34': ('blue', None),\r\n '35': ('purple', None),\r\n '36': ('cyan', None),\r\n '37': ('white', None),\r\n '39': ('white', None),\r\n '40': (None, 'black'),\r\n '41': (None, 'red'),\r\n '42': (None, 'green'),\r\n '43': (None, 'yellow'),\r\n '44': (None, 'blue'),\r\n '45': (None, 'purple'),\r\n '46': (None, 'cyan'),\r\n '47': (None, 'white'),\r\n '49': (None, 'black')\r\n }\r\n self.styles = {\r\n '1': ('bold', True),\r\n '3': ('italics', True),\r\n '4': ('underline', True),\r\n '5': ('blinking', True),\r\n '9': ('strikethrough', True),\r\n '22': ('bold', False),\r\n '23': ('italics', False),\r\n '24': ('underline', False),\r\n '25': ('blinking', False),\r\n '29': ('strikethrough', False)\r\n }\r\n (fg, bg) = self.colours['0']\r\n self._fg = fg\r\n self._bg = bg\r\n self.commandQueue = deque() # Queue for self._send.\r\n self._commandInterval = 0.0 # Time since last command was executed.\r\n self.basicTypes = [\r\n str,\r\n dict,\r\n list,\r\n tuple,\r\n unicode,\r\n int,\r\n long,\r\n float,\r\n bool,\r\n NoneType\r\n ]\r\n self.random = Random()\r\n self.soundRe = re.compile(r'(.*)([*]\\d+)(.*)')\r\n self.codeRe = re.compile(r'^\\n( +)', re.M)\r\n self.variableRe = re.compile(r'(@([a-zA-Z]\\w+))')\r\n self.statementRe = re.compile(r'(@\\{([^}]+)\\})')\r\n self._filename = '' # The property where the filename will be stored (fill it with self.load).\r\n self.environment = {\r\n 'world': self,\r\n 'accessibility': accessibility,\r\n 're': re,\r\n }\r\n self._log = [] # The complete log of the session, containing both input and output.\r\n self._logIndex = 0 # The entry of the last entry which was written to disk.\r\n self._output = [] # The text which should be in the output window.\r\n self.outputBuffer = None # Must contain a .write method.\r\n self.errorBuffer = ErrorBuffer(self.output, [], {'process': False}) # Make errors beep.\r\n self.outputSub = None\r\n self.logSub = None # The line to write to the log in place of the actual line.\r\n self._gag = {\r\n 'entry': 0,\r\n 'output': 0,\r\n 'voice': 0,\r\n 'braille': 0\r\n }\r\n self.con = None # The Telnet connection.\r\n self.invalidPort = lambda value: None if (value >= 1 and value <= 65535) else errors['InvalidPort']\r\n self.onSend = lambda: None\r\n self.onOutput = lambda: None\r\n self.onBeep = lambda: self.errorBuffer.beepFunc()\r\n self.onSave = lambda: None # The method to be called when the world is saved.\r\n self.onOpen = lambda: None # The method to be called when the connection is opened.\r\n self.onConnected = lambda: None # The method to be called when the connection is established.\r\n self.onClose = lambda: None # The method to be called when the connection is closed.\r\n self.onError = lambda error = None: self.errorBuffer.write(error)\r\n self.onTrigger = lambda: None\r\n self.onAlias = lambda: None\r\n # Create default configuration, and let self.load override it if the user wants:\r\n self.defaultConfig = {\r\n 'config': {},\r\n 'aliases': {},\r\n 'triggers': {},\r\n 'variables': {}\r\n }\r\n self.config = ConfManager('World properties')\r\n self.config.add_section('world')\r\n self.config.set('world', 'name', 'Untitled World', title = 'The name of the world', validate = lambda value: None if value else 'World names cannot be blank')\r\n self.config.set('world', 'username', '', title = 'The username to use for this world')\r\n self.config.set('world', 'password', '', title = 'The password for this world (saves in plain text)', kwargs = {'style': wx.TE_PASSWORD})\r\n self.config.set('world', 'notes', '', title = 'Notes for this world', kwargs = {'style': wx.TE_RICH|wx.TE_MULTILINE})\r\n self.config.set('world', 'autosave', True, title = 'Save this world automatically when closed')\r\n self.config.add_section('connection')\r\n self.config.set('connection', 'hostname', '', title = 'The hostname of the world', validate = lambda value: None if value else 'Without a hostname, your world will not be able to connect.')\r\n self.config.set('connection', 'port', 0, title = 'The port to use for this world', validate = self.invalidPort)\r\n self.config.set('connection', 'autoconnect', True, title = 'Automatically connect this world when it opens')\r\n self.config.set('connection', 'connectstring', '', title = 'The connection string, using {u} for username and {p} for password, and seperating the commands with your command seperator')\r\n self.config.add_section('entry')\r\n self.config.set('entry', 'commandchar', '/', validate = lambda value: None if (not re.match('[A-Za-z0-9]', value) and len(value) == 1) else 'The command character can not be a letter or a number, and must be only one character.', title = 'The character to make commands get executed by the client rather than being sent straight to the mud')\r\n self.config.set('entry', 'helpchar', '?', title = 'The command to indicate you need help on something (clear to disable)')\r\n self.config.set('entry', 'commandsep', ';', title = 'The character which seperates multiple commands', validate = lambda value: None if len(value) == 1 else 'Commands must be seperated by only one character')\r\n self.config.set('entry', 'commandinterval', 0.0, title = 'The time between sending batched commands', kwargs = {'min_val': 0.0, 'increment': 0.1, 'digits': 2})\r\n self.config.set('entry', 'echocommands', True, title = 'Echo commands to the output window')\r\n self.config.set('entry', 'logduplicatecommands', False, title = 'Log duplicate commands')\r\n self.config.set('entry', 'processaliases', True, title = 'Process aliases')\r\n self.config.set('entry', 'simple', False, title = 'When adding new aliases and triggers, send their code directly to the game after replacing arguments instead of executing them (can be changed by setting the simple flag)')\r\n self.config.set('entry', 'prompt', 'Entry', title = 'Prompt')\r\n self.config.set('entry', 'escapeclearsentry', True, title = 'Clear the entry line when the escape key is pressed')\r\n self.config.set('entry', 'unicodeform', 'NFKD', title = 'The unicode normalize form', validate = lambda value: None if value in ['NFC', 'NFD', 'NFKC', 'NFKD'] else 'Form must be a valid form for unicodedata.normalize.')\r\n self.config.set('entry', 'encoding', 'ascii', title = 'Text encoding for commands sent to the server', validate = encodeTest)\r\n self.config.add_section('output')\r\n self.config.set('output', 'suppressblanklines', True, title = 'Suppress blank lines in the output window')\r\n self.config.set('output', 'gag', False, title = 'Gag all output')\r\n self.config.set('output', 'processtriggers', True, title = 'Process triggers')\r\n self.config.set('output', 'printtriggers', False, title = 'Print the titles or regular expressions of matched triggers to the output window (useful for debugging)')\r\n self.config.set('output', 'printunrecognisedformatters', False, title = 'Print unrecognised formatters to the output window')\r\n self.config.add_section('accessibility')\r\n self.config.set('accessibility', 'speak', True, title = 'Speak output')\r\n self.config.set('accessibility', 'braille', True, title = 'Braille output (if supported)')\r\n self.config.set('accessibility', 'outputscroll', True, title = 'Allow output window scrolling')\r\n self.config.set('accessibility', 'printcolours', False, title = 'Print ANSI formatters in the output window')\r\n self.config.add_section('logging')\r\n self.config.set('logging', 'logdirectory', '', title = 'Directory to store world log files', validate = lambda value: None if (not value or os.path.isdir(os.path.abspath(os.path.join(self.directory, value)))) else 'Directory must exist. If you do not want logging, leave this field blank.', control = DirBrowseButton)\r\n self.config.set('logging', 'loginterval', 50, title = 'After how many lines should the log be dumped to disk', validate = lambda value: None if value > 10 else 'At least 10 lines must seperate dump opperations.')\r\n self.config.set('logging', 'logencoding', 'UTF-8', title = 'The encoding for log files', validate = encodeTest)\r\n self.config.add_section('sounds')\r\n self.config.set('sounds', 'mastermute', False, title = 'Mute sounds')\r\n self.config.set('sounds', 'mastervolume', 75, title = 'Master volume', validate = lambda value: None if value >= 0 and value <= 100 else 'Volume must be between 0 and 100.')\r\n self.config.add_section('scripting')\r\n self.config.set('scripting', 'enable', True, title = 'Enable scripting')\r\n self.config.set('scripting', 'expandvariables', True, title = 'Expand variables on the command line')\r\n self.config.set('scripting', 'variablere', self.variableRe.pattern, title = 'The regular expression variable declarations on the command line must conform too', validate = lambda value: setattr(self, 'variableRe', re.compile(value)))\r\n self.config.set('scripting', 'expandstatements', True, title = 'Expand statements on the command line')\r\n self.config.set('scripting', 'statementre', self.statementRe.pattern, title = 'The regular expression statements entered on the command line must conform too', validate = lambda value: setattr(self, 'statementRe', re.compile(value)))\r\n self.config.set('scripting', 'startfile', '', title = 'The main script file', control = FileBrowseButton, validate = lambda value: None if not value or os.path.isfile(os.path.abspath(os.path.join(self.directory, value))) else 'This field must either be blank, or contain the path to a script file.')\r\n self.config.set('scripting', 'bypasscharacter', '>', title = 'The command to bypass scripting on the command line')\r\n self.config.add_section('saving')\r\n self.config.set('saving', 'aliases', False, title = 'Save aliases in the world file')\r\n self.config.set('saving', 'triggers', False, title = 'Save triggers in the world file')\r\n self.config.set('saving', 'variables', True, title = 'Save variables in the world file')",
"def makeLogC():\n counter = 0\n for each in nuke.selectedNodes():\n each[\"colorspace\"].setValue(\"AlexaV3LogC\")\n counter += 1\n\n print \"## changed colourspace for %d nodes\" % counter",
"def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"",
"def prepare(self, record: LogRecord):\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also returns the formatted\n # message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info and exc_text attributes, as they are no longer\n # needed and, if not None, will typically not be pickleable.\n\n # Not nedded, since we use tblib\n # msg = self.format(record)\n # # bpo-35726: make copy of record to avoid affecting other handlers in the chain.\n # record = copy.copy(record)\n # record.message = msg\n # record.msg = msg\n # record.args = None\n # record.exc_info = None\n # record.exc_text = None\n return ['log_msg', record]",
"def look_log(self):\n Child(self.model.look_log())",
"def __init__(self, margin):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin",
"def __init__(self, function, argument):\n config.log.critical(\"Error: A bad operation was sent to act(), or act() was unable\")\n config.log.critical(\" to complete the action.\")\n config.log.critical(\" Bad code: %s\" % argument)",
"def __init__(self, color: str, smell: str):\n self.color = color\n self.smell = smell",
"def log(self, message):",
"def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,\r\n formatdb_cmd, blast_results, options, all_ids,\r\n hit_ids, removed_hit_ids,\r\n included_ids, DEBUG):\r\n\r\n log_lines = []\r\n log_lines.append(\"Sequence exclusion analysis run on %s\" % strftime(\"%c\"))\r\n log_lines.append(\r\n \"Formatting subject database took %2.f seconds\" %\r\n (db_format_time))\r\n log_lines.append(\r\n \"BLAST search took %2.f minute(s)\" %\r\n ((blast_time) / 60.0))\r\n log_lines.append(\r\n \"Total analysis completed in %2.f minute(s)\" %\r\n ((time() - start_time) / 60.0))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Options |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.extend(option_lines)\r\n log_lines.append(\"Subject database formatted with command: %s\"\r\n % formatdb_cmd)\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Results |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"BLAST results above e-value threshold:\")\r\n log_lines.append(\r\n \"\\t\".join([\"Query id\", \"Subject id\", \"percent identity\", \"alignment length\",\r\n \"mismatches\", \"gap openings\", \"q. start\", \"q. end\", \"s. start\", \"s. end\", \"e-value\", \"bit score\"]))\r\n\r\n for line in blast_results:\r\n if line.startswith(\"#\"):\r\n continue\r\n else:\r\n log_lines.append(line)\r\n\r\n log_lines.append(\r\n \"Hits matching e-value and percent alignment filter: %s\" %\r\n ','.join(sorted(hit_ids)))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Summary |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"Input query sequences: %i\" % len(all_ids))\r\n log_lines.append(\r\n \"Query hits from BLAST: %i\" %\r\n (len(hit_ids) + len(removed_hit_ids)))\r\n log_lines.append(\r\n \"Query hits from BLAST lacking minimal percent alignment: %i\" %\r\n len(removed_hit_ids))\r\n log_lines.append(\"Final hits: %i\" % len(hit_ids))\r\n log_lines.append(\"Output screened sequences: %i\" % len(included_ids))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Output |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\r\n \"Writing excluded sequences (hits matching filters) to: %s\" %\r\n join(options.outputdir, \"matching.fna\"))\r\n log_lines.append(\r\n \"Writing screened sequences (excluding hits matching filters) to: %s\" %\r\n join(options.outputdir, \"non-matching.fna\"))\r\n log_lines.append(\r\n \"Writing raw BLAST results to: %s\" %\r\n join(options.outputdir, 'raw_blast_results.txt'))\r\n\r\n # format for printing\r\n revised_log_lines = []\r\n for line in log_lines:\r\n line = line + \"\\n\"\r\n revised_log_lines.append(line)\r\n\r\n if DEBUG:\r\n for line in log_lines:\r\n print line\r\n\r\n return revised_log_lines",
"def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result",
"def __str__(self) -> str:\n if self.write_back is black.WriteBack.CHECK:\n reformatted = \"would be reformatted\"\n unchanged = \"would be left unchanged\"\n failed = \"would fail to reformat\"\n cleared = \"would be cleared\"\n else:\n reformatted = \"reformatted\"\n unchanged = \"left unchanged\"\n failed = \"failed to reformat\"\n cleared = \"cleared\"\n report = []\n if self.change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.change_count} cell{s} {reformatted}\", bold=True\n )\n )\n if self.same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.same_count} cell{s} {unchanged}\")\n if self.failure_count:\n s = \"s\" if self.failure_count > 1 else \"\"\n report.append(\n click.style(f\"{self.failure_count} cell{s} {failed}\", fg=\"red\")\n )\n if self.output_change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.output_change_count} output{s} {cleared}\",\n bold=True,\n )\n )\n if self.output_same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.output_same_count} output{s} {unchanged}\")\n return \", \".join(report) + \".\"",
"def log_message(self, format, *args):",
"def __init__(self, bb_log_path):\r\n self.bb_log_path = bb_log_path\r\n pass",
"def set_style(self):",
"def _processLine(self, line):\r\n actual = []\r\n i = 0 # Where we're at in the list.\r\n for chunk in re.split(self.colourRe, line):\r\n if not i: # Chunk is to be printed.\r\n actual.append(chunk)\r\n elif i == 1: #This is the colour string to be replaced.\r\n line = line.replace(chunk, '')\r\n elif i == 2: # This is the bit which tells us which colour is needed.\r\n i = -1 # Increment will set it to 0.\r\n pc = self.config.get('accessibility', 'printcolours')\r\n for c in chunk.split(';'):\r\n if c == '0': # Reset!\r\n (fg, bg) = self.colours['0']\r\n actual.append(StyleObject(foreground = fg, background = bg, bold = False, italics = False, underline = False, strikethrough = False, blink = False))\r\n if pc:\r\n actual.append('<reset>')\r\n elif c in self.colours: # Found the colour.\r\n (fg, bg) = self.colours[c]\r\n text = ''\r\n if fg:\r\n self._fg = fg\r\n text = '%s text' % fg\r\n if bg:\r\n self._bg = bg\r\n text += '%s%s background' % (' on a ' if text else '', bg)\r\n actual.append(StyleObject(foreground = fg, background = bg))\r\n if pc: # Print colours to the output window.\r\n actual.append('<%s>' % text)\r\n elif chunk in ['7', '27']: # Inverse on and off...\r\n (fg, bg) = (self._fg, self._bg)\r\n actual.append(StyleObject(foreground = bg, background = fg))\r\n if pc:\r\n actual.append('<%s>' % 'inverse' if chunk == '7' else '/inverse')\r\n elif chunk in self.styles:\r\n s, v = self.styles[chunk]\r\n o = StyleObject()\r\n setattr(o, s, v)\r\n actual.append(o)\r\n if pc:\r\n actual.append('<%s%s>' % ('' if v else '/', s))\r\n else:\r\n if self.config.get('output', 'printunrecognisedformatters'):\r\n actual.append('<Unrecognised: %s>' % chunk)\r\n i += 1\r\n return (line, actual)",
"def __init__(self, log=False):\n self.log = log",
"def __init__(self, message=\"\", log_message=\"\", **details):\n\n # Call the base class constructor with the parameters it needs\n super().__init__(message)\n\n # Set attributes\n self.log_message = log_message\n self.details = details",
"def __init__(self, message=\"\", log_message=\"\", **details):\n\n # Call the base class constructor with the parameters it needs\n super().__init__(message)\n\n # Set attributes\n self.log_message = log_message\n self.details = details"
]
| [
"0.57048947",
"0.55396056",
"0.5465881",
"0.54398954",
"0.541957",
"0.54109883",
"0.5315492",
"0.5292643",
"0.5258789",
"0.5210646",
"0.51873815",
"0.5179873",
"0.51721483",
"0.51535904",
"0.5147601",
"0.50989956",
"0.5095159",
"0.5091493",
"0.50643826",
"0.50538623",
"0.5044817",
"0.5034723",
"0.50336295",
"0.5032267",
"0.5015027",
"0.499207",
"0.49907118",
"0.49873585",
"0.49840772",
"0.49840772"
]
| 0.59754187 | 0 |
If the found style is not in the dictionary of styles then add the style to the dictionary. | def add_style_to_styles(self, style_key: str, style_value: str):
self._styles[style_key] = style_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_style(self, style, keepdefault=False):\n for key, styledict in style.items():\n target = self.style[key]\n for k, v in styledict.items():\n if keepdefault:\n target.setdefault(k, v)\n else:\n target[k] = v\n return self",
"def _combine_styles(*styles):\n computed_style = {}\n for style in styles:\n if style is not None:\n computed_style.update(style)\n return computed_style",
"def styles(self, styles):\n # each cell owns it's own copy of the styles\n self._styles = {} if styles is None else styles.copy()",
"def _require_style(style):\n if not isinstance(style, (dict, type(None))):\n raise ValueError(\"Expected a dictionary of CSS styles or None, received %s.\" % style)\n return style",
"def update_known_styles_state(app: sphinx.application.Sphinx) -> None:\n global _KNOWN_STYLES_IN_USE\n\n _KNOWN_STYLES_IN_USE = {\n \"light\": _get_light_style(app),\n \"dark\": _get_dark_style(app),\n }",
"def style(style_def):\n if not style_def:\n return {}\n if isinstance(style_def, dict):\n return style_def\n colors = {\"yellow\", \"magenta\", \"green\", \"cyan\", \"blue\", \"red\", \"black\", \"white\"}\n text_styles = {\"bold\", \"underline\", \"dim\", \"reverse\", \"italic\"}\n style = {}\n foreground = True\n style_set = True\n for s in style_def.split(\" \"):\n if s == \"on\":\n foreground = False\n elif s == \"not\":\n style_set = False\n elif s in colors:\n style[\"fg\" if foreground else \"bg\"] = s\n elif s in text_styles:\n style[s] = style_set\n else:\n raise ValueError(\"unknown style '{}'\".format(s))\n return style",
"def _set_style(style):\n if isinstance(style, (str, dict)):\n return Style(style)\n elif isinstance(style, Style):\n return style\n else:\n return Style()",
"def match_style(self, input_style: str) -> str:\r\n try: # Try to get from the dictionary\r\n return self.get_style_from_styles(input_style)\r\n except KeyError: # If you get a key error, it is not in the dictionary\r\n new_style = input(input_style + '\\nWhat style is this?') # Ask the user what style it is\r\n self.add_style_to_styles(input_style, new_style) # Add this style to the dictionary\r\n return new_style # Return the more readable style\r",
"def loadstyle(style_name):\n\n style = {}\n nwc_styles = {} # for backwards compatibility\n style_file = os.path.join(HERE, '..', 'rc', style_name)\n try:\n # Check rc directory for built in styles first\n rc_file(style_file)\n except FileNotFoundError:\n # Check current working dir or path\n style_file = style_name\n try:\n rc_file(style_file)\n except FileNotFoundError as err:\n raise StyleNotFoundError(f\"No such style file found: {err}\")\n style = rcParams.copy()\n\n # The style files may also contain an extra section with typography\n # for titles and captions (these can only be separately styled in code,\n # as of Matplotlib 2.2)\n # This is a hack, but it's nice to have all styling in one file\n # The extra styling is prefixed with `#!`\n with open(style_file, 'r') as file_:\n doc = file_.readlines()\n rc_params_newsworthy = \"\\n\".join([d[2:]\n for d in doc if d.startswith(\"#!\")])\n rc_params_newsworthy = yaml.safe_load(rc_params_newsworthy)\n ###\n # Typography\n ###\n if \"title_font\" in rc_params_newsworthy:\n nwc_styles[\"title_font\"] = [\n x.strip() for x in rc_params_newsworthy[\"title_font\"].split(\",\")\n ]\n else:\n nwc_styles[\"title_font\"] = style[\"font.family\"]\n\n # define as pt or reltive (\"smaller\")\n nwc_styles[\"subtitle.fontsize\"] = rc_params_newsworthy.get(\n \"subtitle.fontsize\",\n None,\n )\n\n # make annotation same font size as ticks by default\n tick_font_size = style.get('xtick.labelsize', \"smaller\")\n nwc_styles[\"annotation.fontsize\"] = rc_params_newsworthy.get(\n \"annotation.fontsize\",\n tick_font_size,\n )\n nwc_styles[\"note.fontsize\"] = rc_params_newsworthy.get(\n \"note.fontsize\",\n \"smaller\",\n )\n nwc_styles[\"caption.fontsize\"] = rc_params_newsworthy.get(\n \"caption.fontsize\",\n \"smaller\",\n )\n\n color = rc_params_newsworthy.get(\"neutral_color\",\n rcParams[\"figure.edgecolor\"])\n black_color = rc_params_newsworthy.get(\"black_color\", BLACK)\n dark_gray_color = rc_params_newsworthy.get(\"dark_gray_color\", DARK_GRAY)\n light_gray_color = rc_params_newsworthy.get(\"light_gray_color\", LIGHT_GRAY)\n strong_color = rc_params_newsworthy.get(\"strong_color\", color)\n positive_color = rc_params_newsworthy.get(\"positive_color\", POSITIVE)\n negative_color = rc_params_newsworthy.get(\"negative_color\", NEGATIVE)\n warm_color = rc_params_newsworthy.get(\"warm_color\", WARM)\n cold_color = rc_params_newsworthy.get(\"cold_color\", COLD)\n fill_between_color = rc_params_newsworthy.get(\"fill_between_color\", FILL_BETWEEN)\n fill_between_alpha = rc_params_newsworthy.get(\"fill_between_alpha\", 0.5)\n nwc_styles[\"black_color\"] = to_rgba(\"#\" + str(black_color), 1)\n nwc_styles[\"dark_gray_color\"] = to_rgba(\"#\" + str(dark_gray_color), 1)\n nwc_styles[\"light_gray_color\"] = to_rgba(\"#\" + str(light_gray_color), 1)\n nwc_styles[\"neutral_color\"] = to_rgba(\"#\" + str(color), 1)\n nwc_styles[\"strong_color\"] = to_rgba(\"#\" + str(strong_color), 1)\n nwc_styles[\"positive_color\"] = to_rgba(\"#\" + positive_color, 1)\n nwc_styles[\"negative_color\"] = to_rgba(\"#\" + negative_color, 1)\n nwc_styles[\"warm_color\"] = to_rgba(\"#\" + warm_color, 1)\n nwc_styles[\"cold_color\"] = to_rgba(\"#\" + cold_color, 1)\n nwc_styles[\"fill_between_color\"] = to_rgba(\"#\" + str(fill_between_color), 1)\n nwc_styles[\"fill_between_alpha\"] = float(fill_between_alpha)\n\n if \"qualitative_colors\" in rc_params_newsworthy:\n nwc_styles[\"qualitative_colors\"] = [\n to_rgba(\"#\" + c.strip(), 1)\n for c in rc_params_newsworthy[\"qualitative_colors\"].split(\",\")\n ]\n\n else:\n nwc_styles[\"qualitative_colors\"] = [to_rgba(\"#\" + c, 1) for c in QUALITATIVE]\n if \"logo\" in rc_params_newsworthy:\n nwc_styles[\"logo\"] = rc_params_newsworthy[\"logo\"]\n\n return style, nwc_styles",
"def style(self, style):\n self.style += [ style ]\n return self",
"def style_exists(stylename, u):\n stat, ds_request = u.request(method = 'GET',\n path = 'rest/styles/' + \\\n stylename + '.json',\n payload = None,\n mime = 'application/json')\n return stat == 200",
"def apply_styles(graph, styles):\n graph.graph_attr.update(\n ('graph' in styles and styles['graph']) or {}\n )\n graph.node_attr.update(\n ('nodes' in styles and styles['nodes']) or {}\n )\n graph.edge_attr.update(\n ('edges' in styles and styles['edges']) or {}\n )\n return graph",
"def style(self, style):\n\n self.container['style'] = style",
"def test_style_dict():\n style = css.StyleDict({\n 'margin_left': 12,\n 'display': 'block'})\n assert style.display == 'block'\n assert style.margin_left == 12\n with raises(KeyError):\n style.position # pylint: disable=W0104",
"def on_add_clicked(self, obj):\n style = self.sheetlist.get_style_sheet(\"default\")\n StyleEditor(_(\"New Style\"), style, self)",
"def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)",
"def cvr_to_styles_dict(argsdict: dict, cvr_df: pd.DataFrame) -> dict:\n start = datetime.datetime.utcnow()\n utils.sts(\"Searching CVR chunk for styles...\", 3)\n\n if 'Ballot Style' in list(cvr_df.columns):\n # in Dane County case, \"Ballot Style\" column contains strings like 'Ballot Style NNN'\n # convert 'Ballot Style NNN' to 'NNN' (must be a string for use as key in JSON)\n cvr_df['Ballot Style'] = cvr_df['Ballot Style'].apply(lambda x: re.sub(r'^\\D+', '', x))\n else:\n # no 'Ballot Style' column\n style_from_precinct_regex = argsdict.get('style_from_precinct_regex', '')\n if style_from_precinct_regex:\n precinct_list = cvr_df['Precinct']\n style_list = []\n for precinct in precinct_list:\n style = re.search(style_from_precinct_regex, precinct)[1]\n style_list.append(style)\n cvr_df.insert(2, 'Ballot Style', style_list)\n else:\n utils.sts(\"No 'Ballot Style' column and no 'style_from_precinct_regex' was provided.\", 0)\n sys.exit(1)\n \n filtered_data = drop_unused_columns(cvr_df)\n unique_ballot_styles = filtered_data['Ballot Style'].unique()\n utils.sts(f\"Found {len(unique_ballot_styles)} unique style(s).\\nMapping Contests per style...\", 3)\n style_dict = get_contests_per_style(filtered_data, unique_ballot_styles)\n \n end = datetime.datetime.utcnow()\n time_taken = utils.show_time((end - start).total_seconds())\n utils.sts(f\"Processed {len(filtered_data)} rows in {time_taken}\", 3)\n\n return style_dict",
"def add_style(self, style_id, icon_href):\r\n doc = xml.dom.minidom.Document()\r\n style = doc.createElement('Style')\r\n style.setAttribute('id', style_id)\r\n doc.appendChild(style)\r\n icon_style = doc.createElement('IconStyle')\r\n style.appendChild(icon_style)\r\n icon = doc.createElement('Icon')\r\n icon_style.appendChild(icon)\r\n href = doc.createElement('href')\r\n icon.appendChild(href)\r\n href_text = doc.createTextNode(icon_href)\r\n href.appendChild(href_text)\r\n self.append(doc)",
"def _css(self, style):\n self._anonymous_css.append((self._order, style))\n self._order += 1",
"def update_style(self):\n pass",
"def validate_style(style):\n valid = {}\n for k, v in style.items():\n if (v.startswith('#') and all([d in hexdigits for d in v[1:]])):\n valid[k] = v\n return valid",
"def UpdateBaseStyles(self):\n super(EditraBaseStc, self).UpdateBaseStyles()\n\n # Set control specific styles\n sback = self.GetItemByName('select_style')\n if not sback.IsNull():\n sback = sback.GetBack()\n else:\n sback = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\n self.VertEdit.SetBlockColor(sback)\n self.DefineMarkers()",
"def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict={}\n for key, value in existing_format.__dict__.iteritems():\n if (value != 0) and (value != {}) and (value != None):\n new_dict[key]=value\n del new_dict['escapes']\n\n return(workbook.add_format(dict(new_dict.items() + dict_of_properties.items())))",
"def add_style(self, strstyle, content=\"\"):\n if content: # str is name of css file to use\n src = self.add_style_str(content, strstyle)\n else: # str is filename of actual css file\n src = self.add_style_file(strstyle)\n\n self.opf.add_manifest(sluggify(src), src, \"text/css\")",
"def _read_stylesheet(self, style):\n tree = ET.parse(style)\n for marker in tree.findall('style'):\n if marker.get('publishable') == 'true':\n self.publishable.add(marker.get('id'))",
"def load_default_style(self):\n self._css_shape = {\n \"point\": {\"color\": (255,0,0), \"paint\": fshape.FILL, \"z-index\":0},\n \"line\": {\"color\": (0,255,0), \"paint\": fshape.STROKE, \"z-index\":0},\n \"area\": {\"color\": (0,0,255), \"paint\": fshape.FILL, \"z-index\":0},\n \"text\": {\"color\": (0,0,0), \"angle\":0, \"paint\": fshape.FILL, \"z-index\":0}\n }\n \n # jeigu simbolis yra nurodytas, tai cia jo stiliaus aprasymas\n self._css_symbol = {\n \"graphics\": {\"z-index\":1000, \"color\": (255,0,0), \"line-width\":0.12} # ocad simboliams kurie yra paversti i grafika\n #\"901_1\": {\"name\":\"Road\", \"color\": (204, 204, 204)}\n }",
"def get_styles(u):\n stat, ds_request = u.request(method = 'GET',\n path = 'rest/styles.json',\n payload = None,\n mime = 'application/json')\n json_data = json.loads(ds_request)\n if json_data.get('styles') == '':\n return None\n styles = json_data.get('styles').get('style')\n\n out = {}\n for style in styles:\n out[style.get('name')] = {'href': style.get('href')}\n return out",
"def apply_styles(source, styles):\n soup = BeautifulSoup(source)\n\n for style in styles:\n for markup in soup.findAll(style.markup):\n markup['style'] = style.style.strip()\n\n return soup.prettify()",
"def add_style(self, style_family, style_name, properties):\n style = odf_create_style (\n style_family,\n name=style_name)\n for elem in properties:\n style.set_properties(properties=elem[1], area=elem[0])\n self.document.insert_style(style, automatic=True)",
"def update(self):\n for stylesheet_path, widgets in self._widget_sheet_map.iteritems():\n with open(stylesheet_path, \"r\") as fid:\n raw_stylesheet = fid.read()\n \n for widget in widgets:\n widget.setStyleSheet(raw_stylesheet)"
]
| [
"0.7267755",
"0.64935213",
"0.6178218",
"0.61260563",
"0.6113688",
"0.61129606",
"0.60676706",
"0.58556426",
"0.5807858",
"0.56835294",
"0.56551135",
"0.55858433",
"0.5543673",
"0.5537918",
"0.5495871",
"0.54484546",
"0.54361624",
"0.54099387",
"0.5384072",
"0.53600764",
"0.53456485",
"0.5343169",
"0.53214407",
"0.5311711",
"0.52988726",
"0.5295967",
"0.5253111",
"0.524582",
"0.52416104",
"0.518204"
]
| 0.6535367 | 1 |
Correct the UKC style to a more readable type of style, using the already created list of styles. If the style isn't in the already created dictionary then ask the user what style it is, then add. | def match_style(self, input_style: str) -> str:
try: # Try to get from the dictionary
return self.get_style_from_styles(input_style)
except KeyError: # If you get a key error, it is not in the dictionary
new_style = input(input_style + '\nWhat style is this?') # Ask the user what style it is
self.add_style_to_styles(input_style, new_style) # Add this style to the dictionary
return new_style # Return the more readable style
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_style(self, style, keepdefault=False):\n for key, styledict in style.items():\n target = self.style[key]\n for k, v in styledict.items():\n if keepdefault:\n target.setdefault(k, v)\n else:\n target[k] = v\n return self",
"def _set_style(style):\n if isinstance(style, (str, dict)):\n return Style(style)\n elif isinstance(style, Style):\n return style\n else:\n return Style()",
"def style(style_def):\n if not style_def:\n return {}\n if isinstance(style_def, dict):\n return style_def\n colors = {\"yellow\", \"magenta\", \"green\", \"cyan\", \"blue\", \"red\", \"black\", \"white\"}\n text_styles = {\"bold\", \"underline\", \"dim\", \"reverse\", \"italic\"}\n style = {}\n foreground = True\n style_set = True\n for s in style_def.split(\" \"):\n if s == \"on\":\n foreground = False\n elif s == \"not\":\n style_set = False\n elif s in colors:\n style[\"fg\" if foreground else \"bg\"] = s\n elif s in text_styles:\n style[s] = style_set\n else:\n raise ValueError(\"unknown style '{}'\".format(s))\n return style",
"def chooseStyle(c):\n if c == None: return None\n c = c[:3]\n styledict = {}\n styledict['alt'] = \"alpha\"\n if c in styledict:\n return styledict[c]\n else:\n return None",
"def add_style_to_styles(self, style_key: str, style_value: str):\r\n self._styles[style_key] = style_value",
"def drink_style_input():\n # Define answered style questions dictionary\n answers_style = {} \n # Loop through style questions\n for style, question in questions.iteritems():\n # Ask whether they like a drink style and set to lower case\n # Test if answer is yes, then add drink style and boolean to dictionary\n answers_style[style] = raw_input(questions[style] + \" Please answer (y)es or (n)o.\").lower() in [\"y\",\"yes\"]\n return answers_style",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_colour='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.2cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='', slider_width='4cm')\n format_box(self.glyph_options_box, box_style='',\n border_visible=False, border_colour='',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.1cm', margin=0)\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='',\n slider_width='4cm')\n format_box(self.glyph_options_box, box_style=style,\n border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin=0)\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def loadstyle(style_name):\n\n style = {}\n nwc_styles = {} # for backwards compatibility\n style_file = os.path.join(HERE, '..', 'rc', style_name)\n try:\n # Check rc directory for built in styles first\n rc_file(style_file)\n except FileNotFoundError:\n # Check current working dir or path\n style_file = style_name\n try:\n rc_file(style_file)\n except FileNotFoundError as err:\n raise StyleNotFoundError(f\"No such style file found: {err}\")\n style = rcParams.copy()\n\n # The style files may also contain an extra section with typography\n # for titles and captions (these can only be separately styled in code,\n # as of Matplotlib 2.2)\n # This is a hack, but it's nice to have all styling in one file\n # The extra styling is prefixed with `#!`\n with open(style_file, 'r') as file_:\n doc = file_.readlines()\n rc_params_newsworthy = \"\\n\".join([d[2:]\n for d in doc if d.startswith(\"#!\")])\n rc_params_newsworthy = yaml.safe_load(rc_params_newsworthy)\n ###\n # Typography\n ###\n if \"title_font\" in rc_params_newsworthy:\n nwc_styles[\"title_font\"] = [\n x.strip() for x in rc_params_newsworthy[\"title_font\"].split(\",\")\n ]\n else:\n nwc_styles[\"title_font\"] = style[\"font.family\"]\n\n # define as pt or reltive (\"smaller\")\n nwc_styles[\"subtitle.fontsize\"] = rc_params_newsworthy.get(\n \"subtitle.fontsize\",\n None,\n )\n\n # make annotation same font size as ticks by default\n tick_font_size = style.get('xtick.labelsize', \"smaller\")\n nwc_styles[\"annotation.fontsize\"] = rc_params_newsworthy.get(\n \"annotation.fontsize\",\n tick_font_size,\n )\n nwc_styles[\"note.fontsize\"] = rc_params_newsworthy.get(\n \"note.fontsize\",\n \"smaller\",\n )\n nwc_styles[\"caption.fontsize\"] = rc_params_newsworthy.get(\n \"caption.fontsize\",\n \"smaller\",\n )\n\n color = rc_params_newsworthy.get(\"neutral_color\",\n rcParams[\"figure.edgecolor\"])\n black_color = rc_params_newsworthy.get(\"black_color\", BLACK)\n dark_gray_color = rc_params_newsworthy.get(\"dark_gray_color\", DARK_GRAY)\n light_gray_color = rc_params_newsworthy.get(\"light_gray_color\", LIGHT_GRAY)\n strong_color = rc_params_newsworthy.get(\"strong_color\", color)\n positive_color = rc_params_newsworthy.get(\"positive_color\", POSITIVE)\n negative_color = rc_params_newsworthy.get(\"negative_color\", NEGATIVE)\n warm_color = rc_params_newsworthy.get(\"warm_color\", WARM)\n cold_color = rc_params_newsworthy.get(\"cold_color\", COLD)\n fill_between_color = rc_params_newsworthy.get(\"fill_between_color\", FILL_BETWEEN)\n fill_between_alpha = rc_params_newsworthy.get(\"fill_between_alpha\", 0.5)\n nwc_styles[\"black_color\"] = to_rgba(\"#\" + str(black_color), 1)\n nwc_styles[\"dark_gray_color\"] = to_rgba(\"#\" + str(dark_gray_color), 1)\n nwc_styles[\"light_gray_color\"] = to_rgba(\"#\" + str(light_gray_color), 1)\n nwc_styles[\"neutral_color\"] = to_rgba(\"#\" + str(color), 1)\n nwc_styles[\"strong_color\"] = to_rgba(\"#\" + str(strong_color), 1)\n nwc_styles[\"positive_color\"] = to_rgba(\"#\" + positive_color, 1)\n nwc_styles[\"negative_color\"] = to_rgba(\"#\" + negative_color, 1)\n nwc_styles[\"warm_color\"] = to_rgba(\"#\" + warm_color, 1)\n nwc_styles[\"cold_color\"] = to_rgba(\"#\" + cold_color, 1)\n nwc_styles[\"fill_between_color\"] = to_rgba(\"#\" + str(fill_between_color), 1)\n nwc_styles[\"fill_between_alpha\"] = float(fill_between_alpha)\n\n if \"qualitative_colors\" in rc_params_newsworthy:\n nwc_styles[\"qualitative_colors\"] = [\n to_rgba(\"#\" + c.strip(), 1)\n for c in rc_params_newsworthy[\"qualitative_colors\"].split(\",\")\n ]\n\n else:\n nwc_styles[\"qualitative_colors\"] = [to_rgba(\"#\" + c, 1) for c in QUALITATIVE]\n if \"logo\" in rc_params_newsworthy:\n nwc_styles[\"logo\"] = rc_params_newsworthy[\"logo\"]\n\n return style, nwc_styles",
"def SetStyle(self, start=None, end=None, style=None):\n # Global default styles for all languages\n self.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"face:%(helv)s,size:%(size)d\" % faces)\n self.StyleClearAll() # Reset all to be like the default\n\n # Global default styles for all languages\n self.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"face:%(helv)s,size:%(size)d\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"back:#C0C0C0,face:%(helv)s,size:%(size2)d\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, \"face:%(other)s\" % faces)\n self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, \"fore:#FFFFFF,back:#0000FF,bold\")\n self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, \"fore:#000000,back:#FF0000,bold\")\n\n # Python styles\n # Default\n self.StyleSetSpec(stc.STC_P_DEFAULT, \"fore:#000000,face:%(helv)s,size:%(size)d\" % faces)\n # Comments\n self.StyleSetSpec(stc.STC_P_COMMENTLINE, \"fore:#007F00,face:%(other)s,size:%(size)d\" % faces)\n # Number\n self.StyleSetSpec(stc.STC_P_NUMBER, \"fore:#007F7F,size:%(size)d\" % faces)\n # String\n self.StyleSetSpec(stc.STC_P_STRING, \"fore:#7F007F\")\n # Single quoted string\n self.StyleSetSpec(stc.STC_P_CHARACTER, \"fore:#7F007F,face:%(helv)s,size:%(size)d\" % faces)\n # Keyword\n self.StyleSetSpec(stc.STC_P_WORD, \"fore:#00007F,bold,size:%(size)d\" % faces)\n # Triple quotes\n self.StyleSetSpec(stc.STC_P_TRIPLE, \"fore:#7F0000,size:%(size)d\" % faces)\n # Triple double quotes\n self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, \"fore:#7F0000,size:%(size)d\" % faces)\n # Class name definition\n self.StyleSetSpec(stc.STC_P_CLASSNAME, \"fore:#0000FF,bold,size:%(size)d\" % faces)\n # Function or method name definition\n self.StyleSetSpec(stc.STC_P_DEFNAME, \"fore:#007F7F,bold,size:%(size)d\" % faces)\n # Operators\n self.StyleSetSpec(stc.STC_P_OPERATOR, \"bold,size:%(size)d\" % faces)\n # Identifiers\n self.StyleSetSpec(stc.STC_P_IDENTIFIER, \"fore:#000000,face:%(helv)s,size:%(size)d\" % faces)\n # Comment-blocks\n self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, \"fore:#7F7F7F,size:%(size)d\" % faces)\n # End of line where string is not closed\n self.StyleSetSpec(stc.STC_P_STRINGEOL, \"fore:#000000,face:%(mono)s,back:#E0C0E0,eol,size:%(size)d\" % faces)\n\n self.SetCaretForeground(\"BLUE\")",
"def switch_style(self):\n self.game_data.set_style(next_style[self.game_data.get_style()])\n self.settings_buttons[0].set_text(get_style_name(self.game_data.get_style()))",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_colour='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = ''\n self.save_button.font_weight = 'normal'\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour= map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = 'primary'\n self.save_button.font_weight = 'bold'\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def setupStyling(self):\n\n\t\tfaces = {\n\t\t\t'times': 'Times New Roman',\n\t\t\t'mono' : 'Courier New',\n\t\t\t'helv' : 'Arial',\n\t\t\t'other': 'Comic Sans MS',\n\t\t\t'size' : 10,\n\t\t\t'size2': 8,\n\t\t}\n\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleClearAll()\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"fore:#928374,back:#212121,face:%(mono)s,size:%(size2)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEXT, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HEADING, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HIDDEN, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODE, \"fore:#b8bb26,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.SYMBOL, \"fore:#81ac71,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEST, \"fore:#ff00ff,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.STRIKE, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.BOLD, \"fore:#d9a62e,bold,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.UNDERLINE, \"fore:#d9a62e,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.ITALIC, \"fore:#7d9d90,italic,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML_ATTRIBUTE, \"fore:#d9a62e,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.FORMAT, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.COMMENT, \"fore:#928372,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_KEYWORD, \"fore:#569cd6,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_SYMBOL, \"fore:#9cdcfe,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TEXT, \"fore:#F9FFE0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_STRING, \"fore:#d69d73,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_COMMENT, \"fore:#57a64a,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FUNCTION, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_CLASS, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TYPE, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FLOW, \"fore:#d8a0df,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_DIGIT, \"fore:#b5ce92,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.IndicatorSetStyle(0, stc.STC_INDIC_SQUIGGLE)\n\t\tself.edit.IndicatorSetForeground(0, wx.RED)",
"def update_known_styles_state(app: sphinx.application.Sphinx) -> None:\n global _KNOWN_STYLES_IN_USE\n\n _KNOWN_STYLES_IN_USE = {\n \"light\": _get_light_style(app),\n \"dark\": _get_dark_style(app),\n }",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.2cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='', slider_width='5cm',\n slider_colour='')\n _format_box(self.glyph_options_box, box_style='',\n border_visible=False, border_color='',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.1cm', margin=0)\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='',\n slider_width='5cm',\n slider_colour=_map_styles_to_hex_colours(style))\n _format_box(self.glyph_options_box, box_style=style,\n border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin=0)\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = ''\n self.save_button.font_weight = 'normal'\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color= _map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = 'primary'\n self.save_button.font_weight = 'bold'\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_colour='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_colour='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour= map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def on_add_clicked(self, obj):\n style = self.sheetlist.get_style_sheet(\"default\")\n StyleEditor(_(\"New Style\"), style, self)",
"def style(self, style):\n if style is None:\n raise ValueError(\"Invalid value for `style`, must not be `None`\")\n allowed_values = [\"NotSet\", \"General\", \"Academic\", \"Business\", \"Technical\", \"Creative\", \"Casual\", \"Web\"]\n if style not in allowed_values:\n raise ValueError(\n \"Invalid value for `style` ({0}), must be one of {1}\"\n .format(style, allowed_values)\n )\n\n self._style = style",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_colour='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.2cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='', labels_buttons_style='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='',\n labels_buttons_style='primary')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.2cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='', slider_width='',\n slider_handle_colour=None, slider_bar_colour=None,\n buttons_style='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='',\n slider_width='',\n slider_handle_colour=map_styles_to_hex_colours(style),\n slider_bar_colour=None, buttons_style='primary')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.2cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='', labels_buttons_style='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='',\n labels_buttons_style='primary')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color= _map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')",
"def prompt_style():\r\n font_numbers = {'0', '1', '2', '3', '4', '5', '6'}\r\n print(\"Background Color\")\r\n background_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(background_color) != 7 or background_color[0] != '#':\r\n while background_color not in COLORS:\r\n print(\"Illegal format\")\r\n background_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(background_color) == 7 and background_color[0] == '#':\r\n break\r\n see_font = str.lower(input(\"Do you want to see what the fonts look like? [yes]\\t\"))\r\n if see_font == \"yes\" or see_font == \"\":\r\n print(\"Close the window when you have made your choice\")\r\n turtle_fonts()\r\n print(\"Choose a font by its number\",\r\n \"0: Arial, size 14\",\r\n \"1: Comic Sans MS, size 14\",\r\n \"2: Lucida Grande, size 14\",\r\n \"3: Tahoma, size 14\",\r\n \"4: Verdana, size 14\",\r\n \"5: Helvetica, size 14\",\r\n \"6: Times New Roman, size 14\", sep='\\n')\r\n font = input(\">> \")\r\n while font not in font_numbers:\r\n font = input(\"Invalid font number, enter from 0 - 6\\t\")\r\n if font == \"0\":\r\n font = \"Arial\"\r\n elif font == \"1\":\r\n font = \"Comic Sans MS\"\r\n elif font == \"2\":\r\n font = \"Lucida Grande\"\r\n elif font == \"3\":\r\n font = \"Tahoma\"\r\n elif font == \"4\":\r\n font = \"Verdana\"\r\n elif font == \"5\":\r\n font = \"Helvetica\"\r\n elif font == \"6\":\r\n font = \"Times New Roman\"\r\n print(\"Paragraph Text Color\")\r\n paragraph_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(paragraph_color) != 7 or paragraph_color[0] != '#':\r\n while paragraph_color not in COLORS:\r\n print(\"Illegal format\")\r\n paragraph_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(paragraph_color) == 7 and paragraph_color[0] == '#':\r\n break\r\n print(\"Heading Color\")\r\n head_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(head_color) != 7 or head_color[0] != '#':\r\n while head_color not in COLORS:\r\n print(\"Illegal format\")\r\n head_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(head_color) == 7 and head_color[0] == '#':\r\n break\r\n return background_color, font, paragraph_color, head_color",
"def stylizer(self, str=unicode(\"\"), style_name=unicode(\"\")):\n sret = str\n\n try:\n\n if self.style_fontstyle[style_name] == \"italic\":\n sret = unicode(\"<i>%s</i>\" % sret)\n\n finally:\n\n try:\n\n if self.style_fontweight[style_name] == \"bold\":\n sret = unicode(\"<b>%s</b>\" % sret)\n\n finally:\n\n try:\n\n if self.style_textunderline[style_name] == \"underlined\":\n sret = unicode('<span style=\"text-decoration: underline;\">%s</span>' % sret)\n\n finally:\n return sret",
"def set_fighting_style(self, style: int):\n self.dna[11] = style",
"def update_style(self):\n pass",
"def set_style(self, style):\n if isinstance(style, string_types):\n style = get_style_by_name(style)\n self._style = style\n self._clear_caches()",
"def styles(**rules):\n if not isinstance(rules, dict):\n raise TypeError(\"Style must be a dict\")\n\n return \";\".join(\n \"%s:%s\" % (k.replace(\"_\", \"-\"), _parse_style_value(v))\n for (k, v) in rules.items()\n )\n\n return _parse_style_value(v)"
]
| [
"0.6187341",
"0.59183043",
"0.5915758",
"0.5880331",
"0.5701508",
"0.5542363",
"0.55048287",
"0.54951644",
"0.5477289",
"0.54621917",
"0.5402817",
"0.53932166",
"0.53902596",
"0.5368642",
"0.53416634",
"0.53233325",
"0.5313398",
"0.53032255",
"0.5299673",
"0.5295995",
"0.5259662",
"0.5243682",
"0.52393407",
"0.5236437",
"0.52346337",
"0.5226476",
"0.52110445",
"0.5202661",
"0.5150116",
"0.51481885"
]
| 0.66815335 | 0 |
Returns the first sunday of the given month of the given year. >>> GetFirstSundayOfMonth(2016, 2) 7 >>> GetFirstSundayOfMonth(2016, 3) 6 >>> GetFirstSundayOfMonth(2000, 1) 2 | def GetFirstSundayOfMonth(year, month):
weeks = calendar.Calendar().monthdays2calendar(year, month)
# Return the first day in the first week that is a Sunday.
return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_day_of_month(date):\n return date.replace(day=1)",
"def first_day_of_month(date):\n return date.replace(day=1)",
"def countSundaysFirstOfMonth(startYear, endYear):\n\tdayOfWeek = 1\n\tnumSundays = 0\n\tfor year in xrange(1900, endYear + 1):\n\t\tfor month in xrange(1, 13):\n\t\t\tif year >= startYear and dayOfWeek == 0:\n\t\t\t\tnumSundays += 1\n\t\t\tdayOfWeek += numDays(month, year)\n\t\t\tdayOfWeek %= 7\n\treturn numSundays",
"def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK",
"def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')",
"def locale_first_weekday():\n\tfirst_weekday = 6 #by default settle on monday\n\n\ttry:\n\t\tprocess = os.popen(\"locale first_weekday week-1stday\")\n\t\tweek_offset, week_start = process.read().split('\\n')[:2]\n\t\tprocess.close()\n\t\tweek_start = datetime.date(*time.strptime(week_start, \"%Y%m%d\")[:3])\n\t\tweek_offset = datetime.timedelta(int(week_offset) - 1)\n\t\tbeginning = week_start + week_offset\n\t\tfirst_weekday = int(beginning.strftime(\"%w\"))\n\texcept:\n\t\tprint \"WARNING - Failed to get first weekday from locale\"\n\n\treturn first_weekday",
"def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))",
"def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]",
"def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))",
"def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string",
"def get_first_date(in_month=1):\n\n from_date = (today-relativedelta(months=in_month)).replace(day=1)\n \n return from_date",
"def week_of_month(dt):\n\n first_day = dt.replace(day=1)\n\n dom = dt.day\n adjusted_dom = dom + first_day.weekday()\n\n return int(ceil(adjusted_dom/7.0))",
"def first_week_day(self) -> int:\n return self._data['week_data']['first_day']",
"def get_mothers_day_date(year):\r\n start_date = parse(f\"Jan {year}\").date()\r\n for date in rrule(YEARLY, dtstart=start_date, bymonth=5, byweekday=SU, bysetpos=2):\r\n if date.year == year:\r\n return date.date()",
"def is_first_day_of_month(timestamps):\n return extract_day_of_month(timestamps) == 1",
"def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day",
"def get_start_date(year, month):\n start_date = date(year, month, 1).strftime(\"%Y-%m-%d\")\n return start_date",
"def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1",
"def week_of_month(dt):\n try:\n first_day = dt.replace(day=1)\n dom = dt.day\n if first_day.weekday() == 6:\n adjusted_dom = dom + day_of_week(dt) - 1\n else:\n adjusted_dom = dom + day_of_week(dt)\n return int(ceil(adjusted_dom/7.0))\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700; FUNCTION ERROR \" + str(e), exc_info=False)\n sys.exit(0)",
"def MayDay(year):\n\n day = datetime.date(year, 5, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 1:\n return day\n day += datetime.timedelta(days=1)",
"def start_month(d):\n return date(d.year, d.month, 1)",
"def last_month_first_day():\r\n return (datetime.now().replace(day=1) + relativedelta(months=-1) + timedelta(days=-1)).strftime(\r\n '%d-%m-%Y')",
"def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1",
"def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon",
"def first_day_of_week(self):\n return self.__first_day_of_week",
"def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))",
"def get_month_start(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(day=1)",
"def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7",
"def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")",
"def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]"
]
| [
"0.64842963",
"0.64842963",
"0.6202308",
"0.6187605",
"0.6112335",
"0.6027973",
"0.60278654",
"0.6023816",
"0.5930069",
"0.5766985",
"0.5711467",
"0.5659422",
"0.56270677",
"0.5547177",
"0.55425495",
"0.5527288",
"0.5471577",
"0.54612195",
"0.5457487",
"0.54439706",
"0.5405907",
"0.53813416",
"0.538074",
"0.5295434",
"0.52779603",
"0.52779114",
"0.52242523",
"0.51497144",
"0.51216155",
"0.5040509"
]
| 0.85827523 | 0 |
Gets the approximate build date given the specific build type. >>> GetBuildDate('default', datetime.datetime(2016, 2, 6, 1, 2, 3)) | def GetBuildDate(build_type, utc_now):
day = utc_now.day
month = utc_now.month
year = utc_now.year
if build_type != 'official':
first_sunday = GetFirstSundayOfMonth(year, month)
# If our build is after the first Sunday, we've already refreshed our build
# cache on a quiet day, so just use that day.
# Otherwise, take the first Sunday of the previous month.
if day >= first_sunday:
day = first_sunday
else:
month -= 1
if month == 0:
month = 12
year -= 1
day = GetFirstSundayOfMonth(year, month)
now = datetime.datetime(
year, month, day, utc_now.hour, utc_now.minute, utc_now.second)
return '{:%b %d %Y %H:%M:%S}'.format(now) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetBuildDate(build_filename):\n try:\n with open(build_filename) as f:\n return float(f.readline())\n except (IOError, ValueError):\n return 0.0",
"def build_date(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = self.about.get(\"Build Date\", \"UNKNOWN\")\n return data",
"def getApplicationBuildDate(self) -> unicode:\n ...",
"def determine_project_date(self):\n\n if self.params[\"hosted on comic\"]:\n\n if self.params[\"workshop date\"]:\n date = self.to_datetime(self.params[\"workshop date\"])\n else:\n date = \"\"\n else:\n datestr = self.params[\"workshop date\"]\n # this happens when excel says its a number. I dont want to force the\n # excel file to be clean, so deal with it here.\n if type(datestr) == float:\n datestr = str(datestr)[0:8]\n\n try:\n date = timezone.make_aware(datetime.datetime.strptime(datestr,\"%Y%m%d\"),\n timezone.get_default_timezone())\n except ValueError as e:\n logger.warn(\"could not parse date '%s' from xls line starting with '%s'. Returning default date 2013-01-01\" %(datestr,self.params[\"abreviation\"]))\n date = \"\"\n\n\n if date == \"\":\n # If you cannot find the exact date for a project,\n # use date created\n if self.params[\"hosted on comic\"]:\n return self.params[\"created at\"]\n # If you cannot find the exact date, try to get at least the year right.\n # again do not throw errors, excel can be dirty\n\n year = int(self.params[\"year\"])\n\n try:\n date = timezone.make_aware(datetime.datetime(year,1,1),\n timezone.get_default_timezone())\n except ValueError:\n logger.warn(\"could not parse year '%f' from xls line starting with '%s'. Returning default date 2013-01-01\" %(year,self.params[\"abreviation\"]))\n date = timezone.make_aware(datetime.datetime(2013,1,1),\n timezone.get_default_timezone())\n\n return date",
"def get_build_timestamp(jenkins_url, job_name, build_nr):\n timestamp = execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/{build_nr}\"\n )\n return datetime.fromtimestamp(timestamp/1000)",
"def build_time(self):\n return self.nodes[0].get('infos').get('system_info').get('build_time')",
"def test_get_build_timestamp(self):\n pass",
"def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )",
"def get_date(date):\n return date",
"def build_type(self) -> Optional[pulumi.Input['BuildTypeArgs']]:\n return pulumi.get(self, \"build_type\")",
"def _GetLastOfficialBuildRevision():\n # First make sure the builder doesn't have any pending builds and is idle.\n builders = _QueryWaterfall('/builders')\n if builders[_SYZYGY_OFFICIAL]['pendingBuilds'] > 0:\n raise RuntimeError('There are pending official builds.')\n if builders[_SYZYGY_OFFICIAL]['state'] != 'idle':\n raise RuntimeError('An official build is in progress.')\n\n # Get the information from the last build and make sure it passed before\n # extracting the revision number.\n build = _QueryWaterfall('/builders/%s/builds/-1' %\n urllib.quote(_SYZYGY_OFFICIAL))\n if 'successful' not in build['text']:\n raise RuntimeError('Last official build failed.')\n return int(build['sourceStamp']['revision'])",
"def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date",
"def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date",
"def get_install_date(gear_name, gear_dict):\n date = 'unknown'\n if gear_name in gear_dict.keys():\n date = gear_dict[gear_name].created\n date = '{day}/{month}/{year}'.format(day=date.day, month=date.month, year=date.year)\n\n return(date)",
"def get_build(self):\n return self.bot_data_file[\"build\"]",
"def _get_date():\n return datetime.datetime.now()",
"def build_type(self) -> pulumi.Output['outputs.BuildTypeResponse']:\n return pulumi.get(self, \"build_type\")",
"def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')",
"def compile_date(self):\n result = self._dll.JLINKARM_GetCompileDateTime()\n return ctypes.cast(result, ctypes.c_char_p).value.decode()",
"def get_date():\n\n return tz.get_brisbane_time().date()",
"def get_date():\n\n return datetime.datetime.utcnow().isoformat()",
"def getdate():\r\n import datetime\r\n return datetime.datetime.now()",
"def get_date():\n return datetime.datetime.now()",
"def test_get_build_number(self):\n pass",
"def get_build(self, build_id):\n pass",
"def infer_release_date(tagname: str) -> Optional[datetime]:\n if tagname in RELEASE_DATES:\n return RELEASE_DATES[tagname]\n elif tagname[0] == \"w\" and tagname < \"w_2020_43\":\n # Weeklies used to be reliably produced on Saturdays, but that changed\n # in October of 2020.\n return datetime.strptime(tagname + \"_6\", \"w_%G_%V_%u\")\n else:\n return None",
"def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)",
"def compute_date(date_text):\n dt = None\n if date_text and len(date_text) == 8:\n try:\n dt = datetime.datetime.strptime(date_text, '%m%d%Y')\n except ValueError:\n pass\n return dt",
"def get_build_type(self):\n build_type_exports = self.export.findall('build_type')\n if len(build_type_exports) == 1:\n return build_type_exports[0].text\n raise InvalidPackage('Only one <build_type> element is permitted.')",
"def build_number(self):\n return self.get_data(\"build_number\")"
]
| [
"0.68794686",
"0.6811394",
"0.6633875",
"0.6123327",
"0.6031452",
"0.59449786",
"0.5787215",
"0.5717675",
"0.5698179",
"0.5636052",
"0.562484",
"0.55658007",
"0.5552328",
"0.55515826",
"0.5527472",
"0.5517274",
"0.55112916",
"0.54897165",
"0.5476035",
"0.54652506",
"0.54183716",
"0.54135567",
"0.5389085",
"0.53807545",
"0.5363876",
"0.53443545",
"0.53409225",
"0.53386974",
"0.53316575",
"0.5289445"
]
| 0.7833104 | 0 |
List current checks on given repo ref. | async def list(app: AppIdentity, repo: str, ref: str):
repo = RepoName.parse(repo)
async with aiohttp.ClientSession(
headers=await app.installation_headers(repo.owner)) as sesh:
fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref)
print(await fetch.execute(sesh)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_refs(config, args):\n for item in lib.input_json_lines():\n yield config.repo.ref(item)",
"def git_status():\n\tl = []\n\tdebug(\"Not implemented\",1)\n\n\treturn l",
"def list_refs(self):\n pass",
"def checklists(self):\n return self.pods.all().checklists",
"def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)",
"def checks(self):\r\n return checks.Checks(self)",
"def _get_check_run_results(\n self, commits: List[dict]) -> List[str]:\n failed_status = {'failure', 'cancelled', 'timed_out', 'action_required'}\n check_run_results = []\n for commit in commits:\n commit_ref = commit['sha']\n commit_check_run_results = get_commit_check_runs(\n self._repo_name, commit_ref, self._auth)\n if not commit_check_run_results:\n continue\n num_check_runs = commit_check_run_results['total_count']\n if num_check_runs == 0:\n check_run_results.append('none')\n continue\n status = 'passed'\n for commit_check_run_result in commit_check_run_results[\n 'check_runs']:\n conclusion = commit_check_run_result['conclusion']\n if conclusion in failed_status:\n status = 'failed'\n break\n check_run_results.append(status)\n return check_run_results",
"def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)",
"def checklists(self):\r\n return Checklists(self)",
"def get_items_changed(self, base_ref='HEAD'):\n command = ['diff-index', '--name-only',\n '--cached', base_ref]\n res = self.run(command)\n items = res.split('\\n') if res else []\n return items",
"def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)",
"def refs_to(cls, sha1, repo):\n matching = []\n for refname in repo.listall_references():\n symref = repo.lookup_reference(refname)\n dref = symref.resolve()\n oid = dref.target\n commit = repo.get(oid)\n if commit.hex == sha1:\n matching.append(symref.shorthand)\n\n return matching",
"def check_for_list(check):",
"def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]",
"def get_filenames_in_commit(git_reference: str = \"\"):\n c = cmd.run(f\"git show --name-only --pretty=format: {git_reference}\")\n if c.return_code == 0:\n return c.out.strip().split(\"\\n\")\n else:\n raise GitCommandError(c.err)",
"def remove_all_status_checks_on_pr_branch(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_remove_all_status_checks(change: Change[str], branch: Branch, existing_checks: Set[str]) -> Change[str]:\n print_debug(\"Removing all status checks from branch %s\" % highlight(branch.name))\n try:\n if existing_checks:\n branch.remove_required_status_checks()\n except GithubException as e:\n print_error(str(e))\n return change.failure()\n else:\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n existing_checks = set(rqs.contexts) # type: Set[str]\n return [Change(\n meta=ChangeMetadata(\n executor=execute_remove_all_status_checks,\n params=[prb, existing_checks]\n ),\n action=ChangeActions.REPLACE,\n before=\"%s checks\" % len(existing_checks),\n after=None,\n )]\n return []",
"def git_status(c):\n c.run(\"git submodule foreach git status\")",
"def _do_list(self, line: str) -> None:\n for_push = \"for-push\" in line\n refs = self.get_refs(for_push=for_push)\n for sha, ref in refs:\n _write(\"%s %s\" % (sha, ref))\n if not for_push:\n head = self.read_symbolic_ref(\"HEAD\")\n if head:\n _write(\"@%s HEAD\" % head[1])\n else:\n self._trace(\"no default branch on remote\", Level.INFO)\n _write()",
"def __gitBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False)",
"def refs(self):\n p = Popen(['git', 'show-ref', '--no-head'], cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_id, refname = line.split()\n yield (CommitId(commit_id), refname)",
"def get_branches(self, *, refs=[\"refs/heads\", \"refs/remotes\"]):\n # type: (Sequence[str]) -> List[Branch]\n stdout = self.git(\n \"for-each-ref\",\n (\n \"--format=\"\n \"%(HEAD)%00\"\n \"%(refname)%00\"\n \"%(upstream)%00\"\n \"%(upstream:remotename)%00\"\n \"%(upstream:track,nobracket)%00\"\n \"%(committerdate:unix)%00\"\n \"%(objectname)%00\"\n \"%(contents:subject)\"\n ),\n *refs\n ) # type: str\n branches = [\n branch\n for branch in (\n self._parse_branch_line(line)\n for line in filter_(stdout.splitlines())\n )\n if branch.name != \"HEAD\"\n ]\n store.update_state(self.repo_path, {\"branches\": branches})\n return branches",
"def buildList(self,list_all=False,push_all=False):\n print \"=== Loading\"\n\n ## Walk in repo and get GitRepo\n for repo in self.repos:\n if repo not in self.ignore or list_all:\n a = GitRepo(repo)\n if not self.animate:\n a.branchStatus()\n if a.globalStatus():\n if a.forward and push_all:\n a.push()\n self.gitrepos.append(a)\n\n print \"\\r=== \"+str(len(self.gitrepos))+\" repos scanned\"",
"def query_git():\n return subprocess.run(\n shlex.split('git status --porcelain=2 --branch'),\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)",
"async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body",
"def git_ls_files(*cmd_args):\n cmd = ['git', 'ls-files']\n cmd.extend(cmd_args)\n return set(subprocess.check_output(cmd).splitlines())",
"def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)",
"def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")",
"def test_v1_alert_ref_list_get(self):\n pass",
"def do_check(self, change):\n\n return []",
"def protect_pr_branch_with_tests_if_any_exist(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_test_protection(change: Change[str], branch: Branch, existing_checks: Set[str],\n known_status_checks: Set[str], known_checkruns: Set[str]) -> Change[str]:\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n\n print_debug(\"[%s] Changing status checks on branch '%s' to [%s]\" %\n (highlight(repo.name), highlight(branch.name),\n highlight(\", \".join(list(all_known_checks)))))\n try:\n if existing_checks:\n branch.edit_required_status_checks(strict=True, contexts=list(all_known_checks))\n else:\n safe_branch_edit_protection(\n branch,\n strict=True,\n contexts=list(all_known_checks),\n )\n except GithubException as e:\n print_error(\"Can't edit required status checks on repo %s branch %s: %s\" %\n (repo.name, branch.name, str(e)))\n return change.failure()\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n existing_checks = set() # type: Set[str]\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n # The repository already has some status checks\n existing_checks = set(rqs.contexts)\n print_debug(\"Branch %s on repo %s already has status checks [%s]\" %\n (highlight(prb.name), highlight(repo.name), highlight(\", \".join(existing_checks))))\n\n # the repository currently has no status checks, let's see if any came in within the last 7 days\n sevendaysago = datetime.now() - timedelta(days=7)\n commits = repo.get_commits(prb.name, since=sevendaysago)\n known_status_checks = set() # type: Set[str]\n known_checkruns = set() # type: Set[str]\n for commit in commits:\n for status in commit.get_statuses(): # type: CommitStatus\n if status.context not in known_status_checks:\n print_debug(\"New status check [%s]: %s %s '%s'\" %\n (commit.sha, status.updated_at,\n status.context, status.description))\n known_status_checks.add(status.context)\n for checkrun in commit.get_check_runs(): # type: CheckRun\n if checkrun.name not in known_checkruns:\n print_debug(\"New check run [%s]: %s %s %s\" %\n (commit.sha, checkrun.completed_at, checkrun.name, checkrun.app))\n known_checkruns.add(checkrun.name)\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n print_debug(\"Found status checks [%s]\" % \", \".join(all_known_checks))\n\n if all_known_checks and all_known_checks != existing_checks:\n # add all known checks as required checks\n print_debug('Adding checks [%s] to branch %s on repo %s' %\n (highlight(\", \".join((all_known_checks) - existing_checks)),\n highlight(prb.name), highlight(repo.name)))\n return [Change(\n meta=ChangeMetadata(\n executor=execute_test_protection,\n params=[prb, existing_checks, known_status_checks, known_checkruns]\n ),\n action=ChangeActions.REPLACE if existing_checks else ChangeActions.ADD,\n before=\"%s checks\" % len(existing_checks) if existing_checks else \"No checks\",\n after=\"%s checks\" % len(all_known_checks),\n )]\n return []"
]
| [
"0.584414",
"0.58094585",
"0.57224625",
"0.5621346",
"0.5513199",
"0.54575586",
"0.5441844",
"0.5393468",
"0.53929275",
"0.5325349",
"0.5319219",
"0.5276341",
"0.5242121",
"0.5219414",
"0.52133495",
"0.52090275",
"0.52059555",
"0.5200033",
"0.5191004",
"0.5177645",
"0.5156163",
"0.51480716",
"0.51328963",
"0.51234275",
"0.5122967",
"0.5118989",
"0.5114291",
"0.5080533",
"0.5063071",
"0.5058904"
]
| 0.71473974 | 0 |
Returns a sorted list of stops, sorted by distance from the given point. | def get_stops_sorted( latitude, longitude ):
returnvalue = []
stops_file = open( 'google_transit/stops.txt' )
stops_iter = DictReader( stops_file )
for stop in stops_iter:
distance = angular_distance( latitude, longitude,
float( stop[ 'stop_lat' ] ), float( stop[ 'stop_lon' ]))
stop[ 'distance' ] = distance * MI
returnvalue.append(( distance, stop ))
stops_file.close( )
returnvalue.sort( )
return [ y for x,y in returnvalue ] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sortDistance(self, point = (-1, -1)):\n return FeatureSet(sorted(self, key = lambda f: f.distanceFrom(point)))",
"def sort(points):\n if len(points) == 0:\n return []\n \n starting_vertex = min(points)\n reference_point = starting_vertex + Point2D(0, 1)\n \n return sorted(points, key=partial(\n get_angle_and_distance, point_2=starting_vertex, point_3=reference_point\n ))",
"def sort_points(point, cloud):\n minsq = [distance_point_point_sqrd(p, point) for p in cloud]\n return sorted(zip(minsq, cloud, range(len(cloud))), key=lambda x: x[0])",
"def sort_points_by_Y(list_of_points):\n sorted_y = sorted(list_of_points, key= lambda pt: pt.getY())\n sorted_y.reverse()\n return sorted_y",
"def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances",
"def optimized_travelling_salesman(points, start=None):\n if start is None:\n start = points[0]\n must_visit = points\n path = [start]\n must_visit.remove(start)\n while must_visit:\n nearest = min(must_visit, key=lambda x: distance(path[-1], x))\n path.append(nearest)\n must_visit.remove(nearest)\n return path",
"def closest_stations(lat: float, lon: float, limit: int = 1) -> List[Dict]:\n dist_sorted = sorted(\n STATIONS, key=lambda s: distance((lat, lon), (s[\"lat\"], s[\"lon\"]))\n )\n return dist_sorted[:limit]",
"def distance_user_point_to_polygons(user_point, polygon_list):\n list_polygons_distances = []\n\n for polygon in polygon_list:\n dist = user_point.distance(polygon)\n list_polygons_distances.append(dist)\n\n #return sorted(list_polygons_distances, key=lambda x: x[1], reverse=True)\n return list_polygons_distances",
"def optimal_points(segments):\n points = []\n segments.sort(key=lambda x: x.end)\n\n while len(segments) != 0:\n s = segments[0]\n points.append(s.end)\n j = 0\n while j < len(segments):\n temp = segments[j]\n if temp.start <= s.end and temp.end >= s.end:\n segments.remove(temp)\n else:\n j += 1\n return points",
"def sort_by_dominance(spart_list):\n score = [\n sum(\n [\n 1 for x in spart_list if x >= y\n ]) for y in spart_list\n ]\n what_would_sort = numpy.argsort(score)\n sorted_sparts = [spart_list[x] for x in what_would_sort]\n return sorted_sparts",
"def get_compressed_neighbors(self, point: Point):\n distances: t.Dict[Point, int] = {point: 0}\n queue = collections.deque([point])\n\n while queue:\n cur_point = queue.popleft()\n distance = distances[cur_point]\n\n if cur_point != point and cur_point in self.points_of_interest():\n # If we've hit a point of interest, then we will yield it and treat the point as a dead end by skipping\n # over its neighbors.\n yield cur_point, distance\n continue\n\n for neighbor in self.neighbors(\n cur_point, collected_keys=set(self.keys.values())\n ):\n if neighbor not in distances:\n distances[neighbor] = distance + 1\n queue.append(neighbor)",
"def get_distance_to_place(points, place):\n distances = np.empty((len(points),), dtype=\"float64\")\n for i, point in tqdm(enumerate(points), desc=\"GETTING POINTS DISTANCES\"):\n p = Point(point[0], point[1])\n distances[i] = p.distance(place[\"geometry\"].iloc[0])\n return distances",
"def sorted_map_objects(self, point, objects):\n # print objects\n # print len(objects)\n if isinstance(objects, list):\n sorted_objects = objects\n else:\n sorted_objects = [j for i in objects for j in i]\n if len(sorted_objects) > 0:\n sorted_objects.sort(key=lambda x: x.distance(point.get_location()), reverse=False)\n return sorted_objects",
"def travelling_salesman(points, start=None):\n if start is None:\n start = points[0]\n return min([perm for perm in permutations(points) if perm[0] == start], key=total_distance)",
"def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]",
"def sort_points(*pts):\n npts = len(pts)\n points = []\n angles = []\n #sort args by angle relative to x, c.c.w\n def _angle(v):\n # cartesian angle is always btwn 0 and 180\n angle = cartesian_angle(v,[1.,0.])\n if (v[1] < 0.):\n return 360. - angle\n else:\n return angle\n for v in pts:\n v = num.array(v[0:2])\n an = _angle(v)\n j = 0\n while j < npts -1:\n if j > len(points)-1: break\n if an < angles[j]: break\n else: j = j + 1\n points.insert(j,v)\n angles.insert(j,an)\n return (points,angles)",
"def to_sorted_points(x):\n return tuple(sorted(x))",
"def rdp_indexes(points, eps2, dist2=None):\n dist2 = point_line_dist2 if dist2 is not None else point_line_dist2\n\n N = len(points)\n keep = [0, N-1]\n\n stack = [(0, N-1)]\n\n for i in xrange(N**2):\n if not stack:\n return sorted(keep)\n\n i0, i1 = stack.pop()\n if i1 <= i0+1:\n continue\n\n d = dist2(points[i0+1:i1], points[i0], points[i1])\n i = np.argmax(d)\n dmax = d[i]\n i += i0 + 1\n\n if dmax > eps2:\n keep.append(i)\n stack += [(i0, i), (i, i1)]\n\n assert False",
"def get_cities_sorted_location(request):\n latitude, longitude = latlang(request)\n point = Point(float(longitude), float(latitude), srid=4326)\n locations = Location.objects.filter(point__distance_lte=(point, D(km=200))).annotate(distance=Distance(\"point\", point)).order_by(\"distance\")[:10]\n return JsonResponse(json.dumps([serializer_distance(location) for location in locations]), safe=False)",
"def get_distances_to_pose(self, x, y):\n\n distances = list()\n for node in self.top_map.nodes:\n distance = dict()\n distance['node'] = node\n distance['dist'] = math.hypot((x - node.pose.position.x), (y - node.pose.position.y))\n distances.append(distance)\n return sorted(distances, key=lambda k: k['dist'])",
"def point_list(self,res,llc,urc,direction):\n\t\tif direction == 2:\n\t\t\tZdist=urc[2]-llc[2]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,0,deltaZ*i]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 1:\n\t\t\tZdist=urc[1]-llc[1]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,deltaZ*i,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 0:\n\t\t\tZdist=urc[0]-llc[0]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([deltaZ*i,0,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]",
"def sortDistance(netlist):\n netlist_dictionary = {}\n for i in range(len(netlist)):\n start = chips[netlist[i][0]]\n end = chips[netlist[i][1]]\n\n delta_x = abs(start[0]-end[0])\n delta_y = abs(start[1]-end[1])\n distance = delta_x + delta_y\n\n netlist_dictionary[(netlist[i][0], netlist[i][1])] = distance\n\n sorted_dictionary = sorted(netlist_dictionary.items(), key=operator.itemgetter(1))\n sorted_netlist = []\n for j in range(len(sorted_dictionary)):\n sorted_netlist.append(sorted_dictionary[j][0])\n\n return sorted_netlist",
"def get_k_neighbors(self, point):\n nn = []\n nnl = []\n for p,l in zip(self.train_features,self.train_labels):\n d = self.distance_function(p,point)\n dl_pair = (d,l)\n nn.append(dl_pair)\n nn = sorted(nn, key = lambda x: x[0])\n for i in range(0,self.k):\n nnl.append(nn[i][1])\n return nnl\n raise NotImplementedError",
"def sort_segment_points(Aps, Bps):\n mid = []\n j = 0\n mid.append(Aps[0])\n for i in range(len(Aps)-1):\n dist = distance_tt_point(Aps[i], Aps[i+1])\n for m in range(j, len(Bps)):\n distm = distance_tt_point(Aps[i], Bps[m])\n if dist > distm:\n direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))\n if direction > 0:\n j = m + 1\n mid.append(Bps[m])\n break\n\n mid.append(Aps[i+1])\n for m in range(j, len(Bps)):\n mid.append(Bps[m])\n return mid",
"def _create_neighbor_distances(self):\n # --------------------------------\n # Create Directions from Point\n # --------------------------------\n diff = [[0 for _ in range(self._dim)]]\n curr = diff[0][:]\n for i in range(self._dim):\n # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0.\n curr[i] = 1\n diff.append(curr[:])\n curr[i] = -1\n diff.append(curr[:])\n curr[i] = 0\n # Remove initial blank unit vector with all values at 0.\n diff.pop(0)\n del curr\n\n # --------------------------------\n # Breadth First Search\n # --------------------------------\n distances = []\n queue = [[0 for _ in range(self._dim)]]\n\n while queue:\n # Get latest distance\n curr = queue.pop()\n\n # The distance from any possible point should be less than or equal to the number of dimensions.\n # This can be shown using basic calculations.\n if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \\\n np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances:\n continue\n\n # Calculate all distances from child and add to queue\n queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))])\n\n # Add current distance to distances\n distances.append(curr)\n\n # Return all possible neighbor distances\n return np.array(distances, dtype=int)",
"def reverse_geolocate(\n cls, lat: float, lon: float, weighted: bool = False\n ) -> List[Place]:\n formatted_point = cls._format_point_postgis(lat, lon)\n distance = cls._postgis_distance(formatted_point)\n\n ordering = (distance + 1) / (Place.popularity + 1) if weighted else distance\n\n query = (\n cls.query.with_entities(cls, distance)\n .filter(cls._postgis_buffered_intersect(formatted_point))\n .order_by(ordering)\n .limit(DEFAULT_LIMIT)\n .all()\n )\n return cls._set_distances(query)",
"def distances_to(self, pt):\n d = [pt.distance(a) for a in self]\n return np.array(d)",
"def closest_points(self, points, maxdist=None):\n return [self.closest_point(point, maxdist) for point in points]",
"def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)",
"def quicksort_from_pos(dataset, lat, lng) -> List[dict]:\n\tdist_from_x = calculateDistance(lat, lng)\n\tadd_dist_to_dataset(dataset, dist_from_x)\n\treturn quicksort(dataset, \"dist\")"
]
| [
"0.7020484",
"0.63203174",
"0.62495136",
"0.5769085",
"0.5747792",
"0.56788474",
"0.5570238",
"0.54841065",
"0.53662777",
"0.5289484",
"0.52861816",
"0.5244743",
"0.52408653",
"0.5218816",
"0.5199385",
"0.5181247",
"0.5177152",
"0.5161949",
"0.5161203",
"0.5150489",
"0.51313776",
"0.5081817",
"0.50730395",
"0.50374687",
"0.5034316",
"0.50340456",
"0.50201964",
"0.5013406",
"0.49879783",
"0.49765942"
]
| 0.657288 | 1 |
Returns the angular distance between two points | def angular_distance( lat1, lon1, lat2, lon2 ):
pi_180 = pi / 180
return acos( cos( lat1 * pi_180 ) * cos( lon1 * pi_180 ) * cos( lat2 * pi_180) * cos( lon2 * pi_180 ) +
cos( lat1 * pi_180) * sin( lon1 * pi_180 ) * cos( lat2 * pi_180) * sin( lon2 * pi_180 ) +
sin( lat1 * pi_180 ) * sin( lat2 * pi_180 )) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angular_distance(lng1, lat1, lng2, lat2):\n\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n\n d_phi = math.radians(lat2 - lat1)\n d_lmd = math.radians(lng2 - lng1)\n\n A = math.pow(math.sin(d_phi / 2), 2) + \\\n math.cos(phi1) * math.cos(phi2) * \\\n math.pow(math.sin(d_lmd / 2), 2)\n\n return 2 * math.atan2(math.sqrt(A), math.sqrt(1 - A))",
"def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d",
"def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)",
"def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2",
"def _angular_distance(mesh, face1, face2): # 其实是余弦距离\n angular_distance = (1 - _list_cos(face1.normal, face2.normal))\n if _list_multiplication(face1.normal, (_list_minus(_face_center(mesh, face2), _face_center(mesh, face1)))) < 0:\n # convex angles are not that bad so scale down distance a bit\n # 凸角不是那么糟糕,所以要把距离缩小一些。\n angular_distance *= eta\n return angular_distance",
"def ang_diff(self, theta1, theta2):\n\n return (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi",
"def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)",
"def angular_separation(r1: np.ndarray, r2: np.ndarray) -> float:\n # First compute the rotation that maps r1 to r2.\n dr = r2 @ r1.transpose()\n # Then extract the angle.\n _, angle = transforms3d.axangles.mat2axangle(dr)\n # Normalise the angle.\n if angle > np.pi:\n angle = 2 * np.pi - angle\n\n # Return the angle in degrees.\n return angle * 180 / np.pi",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)",
"def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist",
"def _angle_between(self, point_1, point_2):\n angle_1 = math.atan2(point_1.y, point_1.x)\n angle_2 = math.atan2(point_2.y, point_2.x)\n return angles.shortest_angular_distance(angle_1, angle_2)",
"def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))",
"def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))",
"def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d",
"def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)",
"def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)",
"def angular_distance(self, z):\n\n return self.proper_distance(z) / (1 + z)",
"def dist(pose1, pose2):\n # type: (Pose, Pose) -> float\n dpose = pose2 - pose1\n dpose[2] = min(dpose[2], 2*np.pi - dpose[2])\n return np.sqrt(np.sum(dpose**2))",
"def distance_between_two_points(p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5",
"def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2",
"def get_distance(pose1, pose2):\n return math.sqrt((pose1.x-pose2.x)**2+(pose1.y-pose2.y)**2)",
"def getDistance(self,p1,p2):\n return sum([(p1[i]-p2[i])**2 for i in range(2)])"
]
| [
"0.74848044",
"0.69298506",
"0.6856045",
"0.6851529",
"0.6850949",
"0.6821571",
"0.6805049",
"0.68026394",
"0.6726844",
"0.67224205",
"0.6722374",
"0.671347",
"0.67133456",
"0.6711086",
"0.6701511",
"0.66720694",
"0.667153",
"0.66648185",
"0.666308",
"0.66407585",
"0.6630755",
"0.6607827",
"0.6606064",
"0.65956223",
"0.6594292",
"0.6567264",
"0.65546876",
"0.6552267",
"0.6543722",
"0.6527143"
]
| 0.7870642 | 0 |
Add the close image to this button. | def __add_icon_to_button(self):
self.set_relief(gtk.RELIEF_NONE)
icon_box = gtk.HBox(False, 0)
image = gtk.Image()
image.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
settings = gtk.Widget.get_settings(self)
width, height = gtk.icon_size_lookup_for_settings(settings, gtk.ICON_SIZE_MENU)
gtk.Widget.set_size_request(self, width + 0, height + 2)
icon_box.pack_start(image, True, False, 0)
self.add(icon_box)
image.show()
icon_box.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_close_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['close'])",
"def createCloseButton(self, parent):\n return Button(parent, Message.LABEL_BUTTON_CLOSE, Icon.ACTION_CLOSE, \n command=self.close)",
"def closeImage(j):\n displayMessage(j, 'j.CloseImage()')\n j.CloseImage()",
"def OnClose(self, event):\n self.OnIconize(event, True)",
"def close(self):\n self.image.close()",
"def click_exit_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['exit'])",
"def landlord_button_close(self):\n return self.write({'state': 'close'})",
"def close(self):\n return _image.image_close(self)",
"def onBtnCloseClicked(self):\n self.close()",
"def CloseButton(self, visible=True):\r\n \r\n return self.SetFlag(self.buttonClose, visible)",
"def hideBtnImg(*args, **kwargs):\n\targs[0].get_image().hide()",
"def activate_statusbar_icon_close():\n pass",
"def on_pushButton_only_close_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def closeEvent(self, event):\n\n\t\tevent.ignore()\n\t\tself.hide()\n\t\tself.__sys_tray_icon.show()",
"def addExitButton(self):\n exitAction = QAction(self.getQIcon('exit.png'), 'Exit the Application', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip(\"Exit the Application.\")\n exitAction.triggered.connect(QtCore.QCoreApplication.instance().quit)\n \n self.addAction(exitAction)",
"def setup_button_stop(self):\n stop_icon = tk.PhotoImage(file = self.stop_icon)\n self.button_stop = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = stop_icon,\n command=self.reset_world)\n self.button_stop.image = stop_icon\n self.button_stop.grid(row = 0, column = 4, sticky=tk.W)",
"def add_Exit_Button(self):\n exit_button = Button(text=\"Exit\", font_size =\"20sp\", background_color =(1, 1, 1, 1), color =(1, 1, 1, 1), size =(32, 32), size_hint =(.3, .3)) #, pos =(300, 250)\n exit_button.bind(on_release = lambda a: self.exit())\n self.layout.add_widget(exit_button)",
"def close(event):\n event.widget.destroy()",
"def uiClearImage(self):\n\n\t\treturn self.__uiClearImage",
"def release_click_options_button(event):\n img_options_button_release_click = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\options_raised_active.png\")\n lbl_options.config(image=img_options_button_release_click)\n lbl_options.image = img_options_button_release_click\n lbl_options.grid(row=16, column=1, columnspan=8, pady=6)",
"def close(self):\n self._command = \"close\"",
"def uiClearClickedImage(self):\n\n\t\treturn self.__uiClearClickedImage",
"def icon(self):",
"def click_close_modal_content_button(self):\n self._basket.click_close_modal_content_button()",
"def image_window_destroy(self, widget, data=None):\n self._quit()",
"def close_UI(self):",
"def setBtnIcon(self):\n self.setIcon(QtGui.QIcon(self.movie.currentPixmap()))\n self.setIconSize(QtCore.QSize(self.size[0], self.size[1]))",
"def icon(self):\n return None",
"def icon(self):\n return None",
"def _close_figure(self):\n if self.disp_images:\n plt.show()\n else:\n plt.close()"
]
| [
"0.78842396",
"0.6995919",
"0.65193367",
"0.63515306",
"0.63302755",
"0.63203615",
"0.62561536",
"0.61974",
"0.61519027",
"0.60773885",
"0.6056336",
"0.6036248",
"0.59958935",
"0.5840996",
"0.5732481",
"0.5679623",
"0.5643048",
"0.56095254",
"0.5570533",
"0.55596006",
"0.55434185",
"0.55283695",
"0.55161244",
"0.54993755",
"0.54767036",
"0.54561234",
"0.54045916",
"0.53888524",
"0.53888524",
"0.5368901"
]
| 0.72414094 | 1 |
Run redmapper on a single healpix pixel. This method will check if files already exist, and will skip any steps that already exist. The border radius will automatically be calculated based on the richest possible cluster at the lowest possible redshift. All files will be placed in self.config.outpath (see self.__init__) | def run(self):
# need to think about outpath
# Make sure all files are here and okay...
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running redMaPPer on pixel %d" % (self.pixel))
firstpass = RunFirstPass(self.config)
if not os.path.isfile(firstpass.filename):
firstpass.run()
firstpass.output(savemembers=False, withversion=False)
else:
self.config.logger.info("Firstpass file %s already present. Skipping..." % (firstpass.filename))
self.config.catfile = firstpass.filename
# Clear out the firstpass memory
del firstpass
like = RunLikelihoods(self.config)
if not os.path.isfile(like.filename):
like.run()
like.output(savemembers=False, withversion=False)
else:
self.config.logger.info("Likelihood file %s already present. Skipping..." % (like.filename))
self.config.catfile = like.filename
# Clear out the likelihood memory
del like
perc = RunPercolation(self.config)
if not os.path.isfile(perc.filename):
perc.run()
perc.output(savemembers=True, withversion=False)
else:
self.config.logger.info("Percolation file %s already present. Skipping..." % (perc.filename))
self.config.stop_file_logging() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running runcat on pixel %d\" % (self.pixel))\n\n runcat = RunCatalog(self.config)\n if not os.path.isfile(runcat.filename):\n runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)\n runcat.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True,\n check_parfile=True, check_randfile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n self.config.start_file_logging()\n self.config.logger.info(\"Running zmask on pixel %d\" % (self.pixel))\n\n rand_zmask = RunRandomsZmask(self.config)\n\n if not os.path.isfile(rand_zmask.filename):\n rand_zmask.run()\n rand_zmask.output(savemembers=False, withversion=False)\n\n # All done\n self.config.stop_file_logging()",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running zscan on pixel %d\" % (self.pixel))\n\n runzscan = RunZScan(self.config)\n if not os.path.isfile(runzscan.filename):\n runzscan.run()\n runzscan.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )",
"def crc_map(self, map_file, *, format='PNG', pix_width=2048, alt_crc_name=None):\n\n result_dir = os.path.join(self.result_dir, 'result') if _external_result_base_dir is None else self.result_dir\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n \n master_dir = os.path.join(self.result_dir, 'master')\n if not os.path.exists(master_dir):\n os.makedirs(master_dir)\n\n file_part = os.path.split(map_file)[1]\n image_result_file = os.path.join(result_dir, \"{}.png\".format(file_part))\n xml_result_file = os.path.join(result_dir, \"{}.xml\".format(file_part))\n\n map_result_file = os.path.join(result_dir, \"{}\".format(file_part))\n self._map_to_results(map_file, xml_result_file, image_result_file, map_result_file, format, pix_width)\n\n xml_result_file_catalog = xml_result_file + '.catalog.xml'\n if os.path.exists(xml_result_file_catalog):\n os.remove(xml_result_file_catalog)\n\n file_name_part = file_part.split('.')[0]\n\n if alt_crc_name is None:\n alt_crc_name = gxsys.func_name(1)\n\n result_files = glob.glob(xml_result_file + '*')\n for result in result_files:\n self._agnosticize_and_ensure_consistent_line_endings(result, file_name_part, alt_crc_name)\n\n if alt_crc_name:\n alt_file_part = file_part.replace(file_name_part, alt_crc_name)\n alt_image_result_file = os.path.join(result_dir, \"{}.png\".format(alt_file_part))\n alt_xml_result_file = os.path.join(result_dir, \"{}.xml\".format(alt_file_part))\n alt_map_result_file = os.path.join(result_dir, \"{}\".format(alt_file_part))\n\n shutil.move(image_result_file, alt_image_result_file)\n shutil.move(map_result_file, alt_map_result_file)\n\n result_files = glob.glob(xml_result_file + '*')\n for result in result_files:\n result_file_part = os.path.split(result)[1]\n alt_result = os.path.join(result_dir, result_file_part.replace(file_name_part, alt_crc_name))\n shutil.move(result, alt_result)\n\n image_result_file = alt_image_result_file\n map_result_file = alt_map_result_file\n xml_result_file = alt_xml_result_file\n image_master_file = os.path.join(master_dir, \"{}.png\".format(alt_file_part))\n map_master_file = os.path.join(master_dir, \"{}\".format(alt_file_part))\n xml_master_file = os.path.join(master_dir, \"{}.xml\".format(alt_file_part))\n else:\n image_master_file = os.path.join(master_dir, \"{}.png\".format(file_part))\n map_master_file = os.path.join(master_dir, \"{}\".format(file_part))\n xml_master_file = os.path.join(master_dir, \"{}.xml\".format(file_part))\n\n if _external_result_base_dir is not None:\n return\n\n xml_result_part = os.path.join('result', os.path.split(xml_result_file)[1])\n xml_master_part = os.path.join('master', os.path.split(xml_master_file)[1])\n xml_result_files = glob.glob(map_result_file + '*')\n xml_master_files = glob.glob(map_master_file + '*')\n\n if SHOW_TEST_VIEWERS:\n gxvwr.view_document(map_file, env={'GEOSOFT_FORCE_MESA_3D': '0'})",
"def _run(evaluation_dir_name, smoothing_radius_grid_cells,\n score_colour_map_name, num_ex_colour_map_name, max_colour_percentile,\n output_dir_name):\n\n if smoothing_radius_grid_cells <= 0:\n smoothing_radius_grid_cells = None\n\n score_colour_map_object = pyplot.get_cmap(score_colour_map_name)\n num_ex_colour_map_object = pyplot.get_cmap(num_ex_colour_map_name)\n error_checking.assert_is_geq(max_colour_percentile, 90.)\n error_checking.assert_is_leq(max_colour_percentile, 100.)\n\n grid_metafile_name = grids.find_equidistant_metafile(\n directory_name=evaluation_dir_name, raise_error_if_missing=True\n )\n\n print('Reading grid metadata from: \"{0:s}\"...'.format(grid_metafile_name))\n grid_metadata_dict = grids.read_equidistant_metafile(grid_metafile_name)\n print(SEPARATOR_STRING)\n\n num_grid_rows = len(grid_metadata_dict[grids.Y_COORDS_KEY])\n num_grid_columns = len(grid_metadata_dict[grids.X_COORDS_KEY])\n\n auc_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n csi_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n pod_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n far_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan)\n num_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n num_positive_examples_matrix = numpy.full(\n (num_grid_rows, num_grid_columns), 0, dtype=int\n )\n\n for i in range(num_grid_rows):\n for j in range(num_grid_columns):\n this_eval_file_name = model_eval.find_file(\n directory_name=evaluation_dir_name, grid_row=i, grid_column=j,\n raise_error_if_missing=False)\n\n if not os.path.isfile(this_eval_file_name):\n warning_string = (\n 'Cannot find file (this may or may not be a problem). '\n 'Expected at: \"{0:s}\"'\n ).format(this_eval_file_name)\n\n warnings.warn(warning_string)\n continue\n\n print('Reading data from: \"{0:s}\"...'.format(this_eval_file_name))\n this_evaluation_dict = model_eval.read_evaluation(\n this_eval_file_name)\n\n num_examples_matrix[i, j] = len(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n num_positive_examples_matrix[i, j] = numpy.sum(\n this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY]\n )\n\n this_evaluation_table = this_evaluation_dict[\n model_eval.EVALUATION_TABLE_KEY]\n\n auc_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.AUC_KEY].values\n )\n csi_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.CSI_KEY].values\n )\n pod_matrix[i, j] = numpy.nanmean(\n this_evaluation_table[model_eval.POD_KEY].values\n )\n far_matrix[i, j] = 1. - numpy.nanmean(\n this_evaluation_table[model_eval.SUCCESS_RATIO_KEY].values\n )\n\n print(SEPARATOR_STRING)\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n if smoothing_radius_grid_cells is not None:\n print((\n 'Applying Gaussian smoother with e-folding radius of {0:.1f} grid '\n 'cells...'\n ).format(\n smoothing_radius_grid_cells\n ))\n\n orig_num_examples_matrix = num_examples_matrix + 0\n num_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_examples_matrix = numpy.round(num_examples_matrix).astype(int)\n num_examples_matrix[orig_num_examples_matrix == 0] = 0 # HACK\n\n num_positive_examples_matrix = general_utils.apply_gaussian_filter(\n input_matrix=num_positive_examples_matrix.astype(float),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n num_positive_examples_matrix = (\n numpy.round(num_positive_examples_matrix).astype(int)\n )\n num_positive_examples_matrix[num_examples_matrix == 0] = 0\n\n auc_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(auc_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n csi_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(csi_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n pod_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(pod_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n far_matrix = general_utils.apply_gaussian_filter(\n input_matrix=ge_utils.fill_nans(far_matrix),\n e_folding_radius_grid_cells=smoothing_radius_grid_cells\n )\n\n auc_matrix[num_positive_examples_matrix == 0] = numpy.nan\n csi_matrix[num_positive_examples_matrix == 0] = numpy.nan\n pod_matrix[num_positive_examples_matrix == 0] = numpy.nan\n far_matrix[num_positive_examples_matrix == 0] = numpy.nan\n\n panel_file_names = []\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # Plot number of examples.\n this_data_matrix = numpy.maximum(numpy.log10(num_examples_matrix), 0.)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=0., max_colour_value=max_colour_value,\n plot_cbar_min_arrow=False, plot_cbar_max_arrow=True, log_scale=True)\n\n axes_object.set_title(r'Number of examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')\n\n panel_file_names.append('{0:s}/num_examples.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot number of positive examples.\n this_data_matrix = num_positive_examples_matrix.astype(float)\n this_data_matrix[this_data_matrix == 0] = numpy.nan\n\n max_colour_value = numpy.nanpercentile(\n this_data_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n this_data_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=num_ex_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=True)\n\n axes_object.set_title('Number of tornadic examples')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')\n\n panel_file_names.append(\n '{0:s}/num_positive_examples.jpg'.format(output_dir_name)\n )\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot AUC.\n max_colour_value = numpy.nanpercentile(auc_matrix, max_colour_percentile)\n min_colour_value = numpy.maximum(\n numpy.nanpercentile(auc_matrix, 100. - max_colour_percentile),\n 0.5\n )\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=auc_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=True, plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('AUC (area under ROC curve)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')\n\n panel_file_names.append('{0:s}/auc.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot CSI.\n max_colour_value = numpy.nanpercentile(csi_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n csi_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=csi_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('CSI (critical success index)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')\n\n panel_file_names.append('{0:s}/csi.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot POD.\n max_colour_value = numpy.nanpercentile(pod_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n pod_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=pod_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('POD (probability of detection)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')\n\n panel_file_names.append('{0:s}/pod.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Plot FAR.\n max_colour_value = numpy.nanpercentile(far_matrix, max_colour_percentile)\n min_colour_value = numpy.nanpercentile(\n far_matrix, 100. - max_colour_percentile)\n\n figure_object, axes_object = _plot_one_value(\n data_matrix=far_matrix, grid_metadata_dict=grid_metadata_dict,\n colour_map_object=score_colour_map_object,\n min_colour_value=min_colour_value, max_colour_value=max_colour_value,\n plot_cbar_min_arrow=min_colour_value > 0.,\n plot_cbar_max_arrow=max_colour_value < 1.)\n\n axes_object.set_title('FAR (false-alarm ratio)')\n plotting_utils.label_axes(axes_object=axes_object, label_string='(f)')\n\n panel_file_names.append('{0:s}/far.jpg'.format(output_dir_name))\n print('Saving figure to: \"{0:s}\"...'.format(panel_file_names[-1]))\n\n figure_object.savefig(\n panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,\n bbox_inches='tight')\n pyplot.close(figure_object)\n\n # Concatenate panels.\n concat_file_name = '{0:s}/spatially_subset_evaluation.jpg'.format(\n output_dir_name)\n print('Concatenating panels to: \"{0:s}\"...'.format(concat_file_name))\n\n imagemagick_utils.concatenate_images(\n input_file_names=panel_file_names, output_file_name=concat_file_name,\n num_panel_rows=NUM_PANEL_ROWS, num_panel_columns=NUM_PANEL_COLUMNS)\n\n imagemagick_utils.resize_image(\n input_file_name=concat_file_name, output_file_name=concat_file_name,\n output_size_pixels=CONCAT_FIGURE_SIZE_PX)",
"def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()",
"def Transform(self, src_dir, dst_dir, funneled_dir=None):\r\n R = Rectifier()\r\n\r\n # i = 0\r\n\r\n start = time.time()\r\n for root, dirs, files in os.walk(src_dir):\r\n \r\n new_path = root.replace(src_dir, dst_dir)\r\n \r\n if (not os.path.exists(new_path)):\r\n os.mkdir(new_path)\r\n \r\n if files != []:\r\n \r\n for file in files:\r\n img = cv2.imread(os.path.join(root, file))\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n theta = R.estimate_rot(gray)\r\n img = R.rectify(gray, theta)\r\n \r\n index = re.search('_\\d\\d\\d\\d.jpg', file)\r\n self.logger.info(root + file + \" : {}\".format(theta))\r\n cv2.imwrite(os.path.join(new_path, file[:index.start()+5] + \"_rectified.jpg\"), img)\r\n \r\n if not funneled_dir is None:\r\n name = file[:index.start()]\r\n new_file_name = file[:index.start()+5] + \"_funneled.jpg\"\r\n funneled_img = cv2.imread(os.path.join(funneled_dir, name, file))\r\n funneled_img = cv2.cvtColor(funneled_img, cv2.COLOR_BGR2GRAY)\r\n funneled_img = R.rectify(funneled_img)\r\n cv2.imwrite(os.path.join(new_path, new_file_name), funneled_img) \r\n \r\n # i += 1\r\n # if (i >= 20):\r\n # break\r\n \r\n print(\"== {:.2f} min has elasped ==\".format((time.time()-start)/60))",
"def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return",
"def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))",
"def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return",
"def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def multiple(folder_name: str,\r\n min_plant_pixels: int = MIN_PLANT_SIZE,\r\n output_options = [['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'distances'],\r\n \r\n ['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers'],\r\n \r\n ['dirt',\r\n 'ditches',\r\n 'rows',\r\n 'clusters',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers',\r\n 'lines']\r\n ]) -> None:\r\n\r\n # Go to the specified folder\r\n ls = listdir(folder_name)\r\n ls = [join(folder_name, i) for i in ls]\r\n\r\n # Check if the folder exists\r\n if join(folder_name, 'Analysis') in ls:\r\n\r\n # If it does, rename the old folder\r\n new_name = join(folder_name, 'Analysis')\r\n while new_name in ls:\r\n new_name += '_old'\r\n \r\n rename(join(folder_name,'Analysis'), new_name)\r\n\r\n # Create new folders inside the given directory\r\n mkdir(join(folder_name, 'Analysis'))\r\n mkdir(join(folder_name, 'Analysis/Images'))\r\n mkdir(join(folder_name, 'Analysis/Data'))\r\n \r\n # Gather the images to be analysed\r\n co = 0\r\n pics = [j for j in ls if isfile(j)]\r\n le = len(pics)\r\n\r\n # Analyze each of the pictures\r\n for i in pics:\r\n\r\n # Make the field\r\n field = just_field(i, min_plant_pixels)\r\n\r\n # Measure the field and save results\r\n print('Saving data...\\n')\r\n ruler = Ruler(field)\r\n \r\n ruler.output_distances(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Distances.csv'.format(basename(i).split('.')[0])\r\n ) \r\n )\r\n \r\n ruler.output_row_info(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Rows.csv'.format(basename(i).split('.')[0])\r\n )\r\n )\r\n\r\n # Make and save visuals\r\n print('Saving pictures...\\n')\r\n for k in range(len(output_options)):\r\n output_options[k]\r\n img = field.make_visual(ruler, output_options[k])\r\n img.save(\r\n join(folder_name,\r\n 'Analysis/Images/{}_Visual_{}.png'.format(basename(i).split('.')[0], k + 1)))\r\n\r\n # Increment the progress meter\r\n co += 1\r\n print('Completed {}/{} images\\n\\n'.format(co, le))",
"def main():\n dpi = 1\n dpi = 2\n width = int(360)\n height = int(130)\n mywidth = int(width*dpi)\n myheight = int(height*dpi)\n FWHM = 7.5 # degrees\n FWHM = 10.0 # degrees\n FWHM = 5.0 # degrees\n FWHM = 3.0 # degrees\n FWHM = 1.0 # degrees\n weight = 1.\n\n nargs = len(sys.argv)\n if nargs < 2:\n print('GR: GRid Observations of integrated intensity produced by the T Command')\n print('GR produces fits images for each of the horns used for the observations.')\n print('For observations at the same coordinates, the ratios of intensities are also produced.')\n print('The FITS format files require header information, which is copied from the')\n print('Cold Load File provided by the user')\n print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')\n print(\"\")\n print('Glen Langston, National Science Foundation -- 20 May 12')\n exit()\n\n gridtype = sys.argv[1]\n gridtype = gridtype.upper()\n print('Grid Type: ', gridtype)\n\n # enable having ra going from 24 to 0 hours == 360 to 0 degrees\n xsign = 1.\n xoffset = 0.\n if gridtype == 'RA':\n xmin = 0.\n xmax = 360.\n ymin = -40.\n ymax = 90.\n maptype = 'RA'\n elif gridtype == '-RA':\n xmin = 0.\n xmax = 360.\n ymin = -40.\n ymax = 90.\n xsign = -1.\n xoffset = 360. # when x = 360. should be at zero.\n maptype = 'RA'\n elif gridtype == '-EL':\n xmin = 0.\n xmax = 360.\n ymin = 0.\n ymax = 90.\n xsign = -1.\n xoffset = 360. # when x = 360. should be at zero.\n maptype = 'AZEL'\n elif gridtype == 'RA0':\n xmin = 0.\n xmax = 360.\n ymin = -41.\n ymax = 89.\n xsign = -1.\n xoffset = 180. # when x = 360. should be at zero.\n gridtype = 'RA'\n elif gridtype == 'GAL':\n xmin = -180.\n xmax = 180.\n ymin = -90.\n ymax = 90.\n maptype = 'GAL'\n\n if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != \"RA0\":\n print('Error parsing grid type: ', gridtype)\n print('1st argument should be either RA, -RA or GAL')\n exit()\n\n rs = radioastronomy.Spectrum()\n\n if doRatio: \n #create the grid with map parameters\n grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n # put each telescope in a different grid\n grids = [grid1, grid2, grid3, grid4]\n\n gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \\\n height=height, dpi=dpi, FWHM=FWHM, \\\n projection=\"-CAR\", gridtype=maptype)\n \n\n projection = \"-AIT\"\n# coldfile \n coldfile = sys.argv[2]\n# get telescope geographic location etc\n print(\"Reading Observing parameters from: %s\" % (coldfile))\n rs.read_spec_ast(coldfile)\n print(\"Observer: %s \" % (rs.observer))\n\n# first read through all data and find hot load\n names = sys.argv[3:]\n names = sorted(names)\n\n firsttime = \"\"\n lasttime = \"\"\n count = 0\n # setup grid indicies so that cpuIndex goes to the correct grid\n # This assumes telescopes 2,3,4,5 are being used] \n gridIndex = [0,0,0,1,2,3]\n # for all save Files to Grid\n for filename in names:\n print(\"File: %s\" % (filename))\n f = open(filename)\n\n date = \"Unknown\"\n while date != \"\":\n date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)\n dlen = len(date)\n if dlen < 1:\n break\n if date[0] == \"#\":\n continue\n # else not a comment process the line\n count = count + 1\n isodate = \"20\"+date+\"T\"+time\n# print(\"DateTime: %s\" % (isodate))\n rs.utc = datetime.datetime.strptime(isodate,\"%Y-%m-%dT%H:%M:%S\")\n# print(\"Utc: %s\" % (rs.utc))\n rs.telaz = telaz\n rs.telel = telel\n rs.azel2radec()\n\n ra = rs.ra\n dec = rs.dec\n lon = rs.gallon\n lat = rs.gallat\n tsum = tSumKmSec\n tsdv = dTSumKmSec\n tmax = tSourcemax\n vave = tVSum\n vsdv = tVSumRms\n if firsttime == \"\":\n firsttime = date\n else:\n lasttime = date\n\n# if vave > -100. and vave < 100:\n# mygrid.convolve( lon, lat, vave, 1.)\n iGrid = gridIndex[cpuIndex]\n gainCorr = telescopefactors[iGrid]\n tsum = tsum * gainCorr\n if gridtype == 'RA':\n if doRatio:\n grids[iGrid].convolve(ra, dec, tsum, weight)\n gridall.convolve( ra, dec, tsum, weight)\n elif gridtype == '-RA':\n x = (ra*xsign) + xoffset\n if doRatio:\n grids[iGrid].convolve(x, dec, tsum, weight)\n gridall.convolve( x, dec, tsum, weight)\n elif gridtype == 'RA0':\n x = (ra*xsign) + xoffset\n if x < 0:\n x = x + xmax\n elif x > xmax:\n x = x - xmax\n if doRatio:\n grids[iGrid].convolve(x, dec, tsum, weight)\n gridall.convolve( x, dec, tsum, weight)\n else:\n if doRatio:\n grids[iGrid].convolve(lon, lat, tsum, weight)\n gridall.convolve( lon, lat, tsum, weight)\n\n if count == 0:\n print('Convolving Coordinates: ', ra, dec, lon, lat)\n print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)\n print('Convolvign Parameters : ', n, time)\n count = count + 1\n # end reading all lines in save file\n f.close()\n\n # normalize each of the gridded images\n if doRatio:\n grids[0].normalize()\n grids[1].normalize()\n grids[2].normalize()\n grids[3].normalize()\n gridall.normalize()\n# mygrid.check()\n# zmin = -1000.\n# zmax = 3000.\n# limit grid intensities for plotting\n# mygrid.set_ij( 0, 0, zmax, 1.)\n# mygrid.set_ij( 1, 1, zmin, 1.)\n# mygrid.limit(zmin, zmax)\n\n subplots = False\n\n if subplots:\n fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)\n\n if gridtype == 'RA':\n cax = fig.add_axes([-180, 180], [-90, 90])\n else:\n cax = fig.add_axes([0, 24], [-90, 90])\n\n cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')\n cbar.ax.set_yticklabels([str(zmin), str(zmax)])\n\n ax.set_title(\"Citizen Science: Horn observations of our Galaxy\")\n else:\n#y_ticks = ymin + (ymax-ymin)*ticks/myheight\n\n ticks = np.arange(0, mywidth, 30*dpi)\n x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)\n\n plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))\n\n if firsttime != lasttime:\n plt.title(\"Citizen Science: Observing our Galaxy: %s to %s\" % (firsttime, lasttime))\n else:\n plt.title(\"Citizen Science: Observing our Galaxy: %s\" % (firsttime))\n if gridtype == 'RA':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = ticks/(mywidth/24)\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == '-RA':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = 24 - (ticks/(mywidth/24))\n labels[0] = 0\n labels[0] = 24\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == '-EL':\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Elevation (degrees)\")\n labels = 24 - (ticks/(mywidth/24))\n labels[0] = 0\n labels[0] = 24\n yticks = np.arange(0, myheight, 15*dpi)\n elif gridtype == 'RA0': # put 0 hours in middle of plot\n plt.xlabel(\"Right Ascension (hours)\")\n plt.ylabel(\"Declination (degrees)\")\n labels = 12 - (ticks/(mywidth/24))\n nlabels = len(labels)\n for iii in range(nlabels):\n if labels[iii] < 0:\n labels[iii] = 24 + labels[iii]\n if labels[iii] == 24:\n labels[iii] = 0\n yticks = np.arange(0, myheight, 15*dpi)\n else:\n yticks = np.arange(0, myheight, 30*dpi)\n ticks = np.arange(0, mywidth, 30*dpi)\n x_ticks = xmin + (xmax-xmin)*ticks/mywidth\n labels = x_ticks\n plt.xlabel(\"Galactic Longitude (degrees)\")\n plt.ylabel(\"Galactic Latitude (degrees)\")\n # wnat an integer list of labels\n# slabels = str(labels)\n print(ticks, labels)\n y_ticks = ymax - (ymax-ymin)*yticks/myheight\n plt.yticks(yticks, y_ticks)\n plt.xticks(ticks, labels, rotation='horizontal')\n plt.colorbar()\n\n crval2 = (xmin + xmax)/2.\n crval1 = (ymin + ymax)/2.\n cdelt1 = (-1./float(dpi)) - .001\n cdelt2 = (1./float(dpi)) + .001\n if doRatio:\n# now show eacsh of the images\n for iGrid in range(4):\n imagetemp = copy.deepcopy(grids[iGrid].image)\n imagetemp2 = copy.deepcopy(grids[iGrid].image)\n kkk = myheight - 1\n for jjj in range(myheight):\n imagetemp[:][kkk] = imagetemp2[:][jjj]\n kkk = kkk - 1\n grids[iGrid].image = imagetemp\n writeFitsImage( rs, iGrid+2, grids[iGrid], projection)\n\n # put each telescope in a different grid\n ratio1 = copy.deepcopy(grid1)\n ratio2 = copy.deepcopy(grid1)\n ratio3 = copy.deepcopy(grid1)\n gratios = [ratio1, ratio2, ratio3]\n ratios = np.zeros(3)\n rmss = np.zeros(3)\n\n jGrid = 3\n for iGrid in range(3):\n print(\"Gain Ratios for Telescopes T%d and T%d\" % (iGrid+2, jGrid+2))\n ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])\n ratios[iGrid] = ratio\n rmss[iGrid] = rms\n writeFitsImage( rs, iGrid+2, aratio, projection)\n \n writeFitsImage( rs, 0, gridall, projection)\n plt.show()",
"def run(self):\r\n\r\n self.roi_analyzer = ROIAnalyzer(\r\n ini_path=self.config_path,\r\n data_path=self.outlier_corrected_dir,\r\n calculate_distances=True,\r\n settings=self.settings,\r\n )\r\n self.roi_analyzer.files_found = self.files_found\r\n self.all_shape_names = self.roi_analyzer.shape_names\r\n self.roi_analyzer.run()\r\n self.roi_analyzer.compute_framewise_distance_to_roi_centroids()\r\n self.roi_distances_dict = self.roi_analyzer.roi_centroid_distance\r\n self.roi_entries_df = self.roi_analyzer.detailed_df\r\n if self.roi_directing_viable:\r\n self.directing_analyzer.run()\r\n self.roi_direction_df = self.directing_analyzer.results_df\r\n\r\n self.data = {}\r\n for file_cnt, file_path in enumerate(self.features_files):\r\n _, self.video_name, _ = get_fn_ext(file_path)\r\n _, _, self.fps = self.read_video_info(video_name=self.video_name)\r\n data_df = read_df(file_path, self.file_type)\r\n self.out_df = deepcopy(data_df)\r\n self.__process_within_rois()\r\n self.__distance_to_roi_centroids()\r\n if self.roi_directing_viable:\r\n self.__process_directionality()\r\n self.data[self.video_name] = self.out_df",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)",
"def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)",
"def example():\n blackhole = BlackHole()\n img_name = os.path.join('images', 'milkyway.jpg')\n blackhole.open(img_name, size=1000)\n blackhole.compute(Rs=8, D=50)\n blackhole.img2.save('example.jpg')",
"def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)",
"def main(directory):\n # List sorted filenames under a directory.\n # Remove ending '/' if any.\n directory.rstrip('/')\n filenames = list_sorted_filenames(directory)\n print(len(filenames))\n\n target_circle_radius = 1000\n target_circle_radius_is_set = False\n\n cv.namedWindow('image', cv.WINDOW_NORMAL)\n\n for filename in filenames:\n # Read an image.\n img = cv.imread(directory + '/' + filename, cv.IMREAD_COLOR)\n grayscale_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # Find the largest enclosing circle.\n circle_center, circle_radius = find_largest_enclosing_circle(grayscale_img)\n if circle_radius == 0:\n # No circle is found. Skip.\n continue\n cv.circle(img, circle_center, circle_radius, (0, 255, 0), 2)\n\n # Use the radius of the first image as the target radius.\n if not target_circle_radius_is_set:\n target_circle_radius = circle_radius\n target_circle_radius_is_set = True\n\n # Center the image to the circle center, and scale to the target circle radius.\n aligned_img = normalize_image(img, circle_center, circle_radius, target_circle_radius)\n\n cv.imshow('image', aligned_img)\n cv.waitKey(0)\n\n # Allow the user to interactively adjust the found circle.\n\n # Allow the user to reset the image transformation.\n\n # Allow the user to mark an image to be disgarded.",
"def run(self):\n #calculate platescale of first input image\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.cd)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.pc)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n pscale = self.datain[0].header['PIXSCAL']\n #filtering out images which are too far away from the others\n #passing images added to a list of (image, WCS) tuples\n '''\n image_centers = []\n for f in self.datain:\n image_centers.append((f.header['CRVAL1'], f.header['CRVAL2']))\n filtered_datain = []\n dist_list = [[[0]*(len(image_centers)-1)]*len(image_centers)]\n for i in range(len(image_centers)):\n for j in range(len(image_centers)-1):\n dist_list[i][j+1] = np.sqrt((image_)**2+()**2)\n '''\n #calculations necessary for updating wcs information\n px = []\n py = []\n \n #in order to avoid NaN interactions, creating weight map\n weights=[]\n for f in self.datain:\n weights.append((np.where(np.isnan(f.image) == True, 0, 1)))\n \n for f in self.datain:\n px.extend(wcs.WCS(f.header).calc_footprint()[:,0])\n py.extend(wcs.WCS(f.header).calc_footprint()[:,1])\n x0 = (max(px)+min(px))/2.\n y0 = (max(py)+min(py))/2.\n sx = (max(px)-min(px))*np.cos(y0/180*np.pi) # arcsec\n sy = (max(py)-min(py)) # arcsec\n size = (sx*3600+self.getarg('pad')*2, sy*3600+self.getarg('pad')*2)\n xpix = size[0]//pscale\n ypix = size[1]//pscale\n cdelt = [pscale/3600.]*2\n \n #create self.dataout and give it a copy of an input's header\n self.dataout = DataFits(config = self.config)\n self.dataout.header = self.datain[0].header.copy()\n \n #update header wcs information\n self.log.info('Creating new WCS header')\n \n self.dataout.header['CRPIX1'] = xpix/2\n self.dataout.header['CRPIX2'] = ypix/2\n self.dataout.header['CRVAL1'] = x0\n self.dataout.header['CRVAL2'] = y0\n self.dataout.header['CD1_1'] = -cdelt[0]\n self.dataout.header['CD1_2'] = self.dataout.header['CD2_1'] = 0.\n self.dataout.header['CD2_2'] = cdelt[1]\n self.dataout.header['NAXIS1'] = int(xpix)\n self.dataout.header['NAXIS2'] = int(ypix)\n self.dataout.header['CTYPE1'] = 'RA---TAN-SIP'\n self.dataout.header['CTYPE2'] = 'DEC--TAN-SIP'\n self.dataout.header['RADESYS'] = 'ICRS'\n self.dataout.header['EQUINOX'] = 2000\n self.dataout.header['LATPOLE'] = self.datain[0].header['CRVAL2']\n self.dataout.header['LONPOLE'] = 180\n self.dataout.header['PIXASEC'] = pscale\n \n theta_rad = np.deg2rad(self.getarg('outangle'))\n rot_matrix = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n rot_cd = np.dot(rot_matrix, np.array([[self.dataout.header['CD1_1'], 0.],[0., self.dataout.header['CD2_2']]]))\n for i in [0,1]:\n for j in [0,1]:\n self.dataout.header['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n \n #check drizzle arguments\n if self.getarg('kernel') == 'smoothing':\n kernel = 'lanczos3'\n elif self.getarg('kernel') in ['square', 'point', 'gaussian', 'tophat']:\n kernel = self.getarg('kernel')\n else:\n self.log.error('Kernel name not recognized, using default')\n kernel = 'square'\n if self.getarg('drizzleweights') == 'uniform':\n driz_wt = ''\n elif self.getarg('drizzleweights') in ['exptime', 'expsq']:\n driz_wt = self.getarg('drizzleweights')\n else:\n self.log.error('Drizzle weighting not recognized, using default')\n driz_wt = ''\n \n #create drizzle object and add input images\n fullwcs = wcs.WCS(self.dataout.header)\n self.log.info('Starting drizzle')\n driz = drz.Drizzle(outwcs = fullwcs, pixfrac=self.getarg('pixfrac'), \\\n kernel=kernel, fillval='10000', wt_scl=driz_wt)\n for i,f in enumerate(self.datain):\n self.log.info('Adding %s to drizzle stack' % f.filename)\n driz.add_image(f.imgdata[0], wcs.WCS(f.header), inwht=weights[i])\n \n try:\n fillval=float(self.getarg('fillval'))\n except:\n fillval=np.nan\n self.log.error('Fillvalue not recognized or missing, using default')\n \n #creates output fits file from drizzle output\n self.dataout.imageset(np.where(driz.outsci == 10000, fillval, driz.outsci))\n self.dataout.imageset(driz.outwht,'OutWeight', self.dataout.header)\n self.dataout.filename = self.datain[0].filename\n\n #add history\n self.dataout.setheadval('HISTORY','Coadd: %d files combined with %s kernel, pixfrac %f at %f times resolution' \\\n % (len(self.datain), kernel, self.getarg('pixfrac'), self.getarg('resolution')))",
"def process_imgdir(self,imgdir):\n #Write images into resultdir\n resultdir = os.path.join(imgdir, 'results')\n #Read images from input dir\n inputdir = os.path.join(imgdir, 'inputs')\n shutil.rmtree(resultdir)\n os.mkdir(resultdir)\n #Read files from input images\n for fullname in os.listdir(inputdir):\n filepath = os.path.join(inputdir, fullname)\n if os.path.isfile(filepath):\n basename = os.path.basename(filepath)\n image = cv2.imread(filepath, cv2.IMREAD_COLOR)\n if len(image.shape) == 3 and image.shape[2] == 3:\n print('Processing %s ...' % basename)\n else:\n sys.stderr.write('Skipping %s, not RGB' % basename)\n continue\n #Extract haze from the scene and then save the image\n dehazed = self.get_scene_radiance(image)\n cv2.imwrite(os.path.join(resultdir, basename), dehazed)\n return os.path.join(resultdir, basename)",
"def palette_op(self, palette_size, sample_factor = 4):\n print(\"||||| Initiating Palette Fill Operation |||||\")\n fill_op = shmops.Fill_Operation(id='4321')\n\n tiles = self.slice_to_tiles(show_info=\"Image to Map\")\n\n #get palette to be used in the process\n if sample_factor == 1:\n palette = palette.generate(self.path, palette_size, debug=self.debug)\n else:\n #get combined palette by slicing map into sample tiles\n sampling_map_size = self.get_map_size(sample_factor)\n palette = self.get_combined_palette(palette_size, sampling_map_size)\n\n temp_path = Path('temp_img.png')\n x, y = 0,0\n for row in progress_bar.progress_bar(tiles, \"Processing Map: \", \" Row: \",36):\n for tile in row:\n #if self.debug:\n # temp_path = f'{x}x{y}y_temp_img.png'\n # temp_path = Path('./test_tiles/' + temp_path)\n tile.save(temp_path,\"PNG\")\n dominant = Haishoku.getDominant(str(temp_path))\n tile_color = palette.nearest_color(palette, dominant)\n #if self.debug: print(f'Tile Address: {x}, {y} | Tile Color: {tile_color} | Saved to: {temp_path}')\n fill_op.add_fill(x,y,palette.rgb_to_hex(*tile_color))\n x += 1\n y += 1\n x = 0\n if not self.debug: temp_path.unlink()\n\n return fill_op",
"def test_full_resize(self):\n number_of_pixels = 300\n destination = base_path +'/test_data/rendering_tests/resized_images/'\n source_folder = base_path + '/test_data/rendering_tests/filter_database/'\n\n\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n self.assertEqual(0, len(os.listdir(destination)))\n rb.find_all_files(number_of_pixels,source_folder, destination)\n self.assertEqual(6, len(os.listdir(destination)))\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination,the_file)\n with Image.open(file_path) as f:\n self.assertNotEqual(number_of_pixels+5, f.size[0])\n self.assertNotEqual(number_of_pixels+5, f.size[1])\n # the above checks that the size does not vary as needed\n # probably not necessary\n self.assertEqual(number_of_pixels, f.size[0])\n self.assertEqual(number_of_pixels, f.size[1])",
"def calculate(self, fullFile):\r\n\r\n # Initialize variables\r\n pixelSize = 0\r\n areaFracBright = 0\r\n estSize= 0\r\n diameter_average_bright = 0\r\n diameter_SD_bright = 0\r\n diameter_num_bright = 0\r\n diameter_average_dark = 0\r\n diameter_SD_dark = 0\r\n diameter_num_dark = 0\r\n length_average_bright = 0\r\n length_SD_bright = 0\r\n length_num_bright = 0\r\n length_average_dark = 0\r\n length_SD_dark = 0\r\n length_num_dark = 0\r\n area_average_bright = 0\r\n area_SD_bright = 0\r\n area_num_bright = 0\r\n area_average_dark = 0\r\n area_SD_dark = 0\r\n area_num_dark = 0\r\n sumLength_average_bright = 0\r\n sumLength_SD_bright = 0\r\n sumLength_num_bright = 0\r\n sumLength_average_dark = 0\r\n sumLength_SD_dark = 0\r\n sumLength_num_dark = 0\r\n \r\n # Rename text boxes and tkinter variables for easier reference\r\n results = self.textResults\r\n status = self.textStatus\r\n showSteps = self.varShowSteps.get()\r\n outputExcel = self.varOutputExcel.get()\r\n savePDF = self.varSavePDF.get()\r\n\r\n # Handle strange GUI input.\r\n if self.varSegment.get() == \"binary\":\r\n self.varSaveBinary.set(False)\r\n\r\n # Select the image to operate on.\r\n if self.varSegment.get() == \"binary\":\r\n brightFile = selectFile(title=\"Select the bright phase mask\")\r\n darkFile = selectFile(title=\"Select the bright phase mask\")\r\n folder = '/'.join(brightFile.split('/')[:-1])\r\n fname = brightFile.split('/')[-1].split(' -')[:-1][0]\r\n ftype = brightFile.split('.')[-1]\r\n fnametype = fname + '.' + ftype\r\n pixelString = brightFile.split('--')[1] \r\n pixelSize = float(pixelString)\r\n fullFile = brightFile\r\n\r\n else:\r\n #fullFile = selectFile()\r\n # Figure out file name and type.\r\n folder = '/'.join(fullFile.split('/')[:-1])\r\n fname = fullFile.split('/')[-1].split('.')[:-1][0]\r\n ftype = fullFile.split('.')[-1]\r\n fnametype = fname + '.' + ftype\r\n\r\n # Status update.\r\n if self.varSegment.get() == \"binary\":\r\n self.write(status, \"Operating on\", fnametype, \"(binary)\")\r\n self.write(results, \"Operating on\", fnametype, \"(binary)\")\r\n else:\r\n self.write(status, \"Operating on\", fnametype)\r\n self.write(results, \"Operating on\", fnametype)\r\n\r\n # Load image.\r\n img_raw = inout.load(fullFile)\r\n\r\n # Prepare for output of data to excel\r\n if outputExcel:\r\n if self.outFold == None:\r\n excelPath = folder+'/'+fname+' - All Measurements.xlsx'\r\n writer = pd.ExcelWriter(excelPath)\r\n else:\r\n excelPath = self.outFold+'/'+fname+' - All Measurements.xlsx'\r\n writer = pd.ExcelWriter(excelPath)\r\n\r\n # Prepare PDF file.\r\n pdf = None\r\n if savePDF:\r\n if self.outFold == None:\r\n pdf = PdfPages(folder+'/'+fname+' - Steps.pdf')\r\n else:\r\n pdf = PdfPages(self.outFold+'/'+fname+' - Steps.pdf')\r\n\r\n # Show steps.\r\n if showSteps:\r\n display.showFull(img_raw, title=\"Raw Image.\")\r\n\r\n if savePDF:\r\n inout.pdfSaveImage(pdf, img_raw, title=\"Raw Image.\")\r\n\r\n # Select the image data and scale bar.\r\n if self.varSegment.get() != \"binary\":\r\n\r\n if self.varAutoParse.get():\r\n profile = self.varProfile.get()\r\n img, pixelSize = autoSelect.autoDetect(img_raw, profile=profile)\r\n else:\r\n img, scale = manualSelectImage(img_raw)\r\n \r\n # Ensure proper dtype.\r\n img = inout.uint8(img) \r\n scale = inout.uint8(scale)\r\n\r\n # Get the pixel size.\r\n pixelSize = measure.manualPixelSize(scale)\r\n self.write(results, \"The pixel size is %.3f nm\" %(pixelSize))\r\n \r\n # Get a rough estimated of the ligament diameter.\r\n bright, dark = segment.roughSegment(img)\r\n estSizeD = measure.estimateDiameter(dark)\r\n estSizeB = measure.estimateDiameter(bright)\r\n estSizeB_rough = copy.copy(estSizeB)\r\n estSizeD_rough = copy.copy(estSizeD)\r\n # Get the average estimated size with a weight towards the thinner\r\n estSize = (min(estSizeD, estSizeB) + estSizeD + estSizeB) / 3\r\n # Result update \r\n self.write(results, \"Weighted average estimated diameter: \",\r\n round(estSize,3),\" pixels (\", round(estSize*pixelSize,3),\r\n \" nm).\", sep='')\r\n if savePDF:\r\n inout.pdfSaveImage(pdf, img, title=\"Selected image data\",\r\n cmap=plt.cm.gray)\r\n else:\r\n img = img_raw.copy()\r\n img = inout.uint8(img)\r\n\r\n # Segment the image.\r\n # Status update.\r\n if self.varSegment.get() == \"accurate\":\r\n self.write(status, \"Segmenting image. This may take a while,\",\r\n \"especially at higher resolutions.\")\r\n else:\r\n self.write(status, \"Segmenting image.\")\r\n # Segment based on options. \r\n if self.varSegment.get() == \"fast\":\r\n #already did this when estimating size\r\n pass\r\n elif self.varSegment.get() == \"manual\":\r\n bright, dark = segment.manualSegment(img)\r\n elif self.varSegment.get() == \"accurate\":\r\n bright, dark = segment.segment(img, estSize)\r\n elif self.varSegment.get() == \"binary\":\r\n bright = imread(brightFile)\r\n dark = imread(darkFile)\r\n estSizeD = measure.estimateDiameter(dark)\r\n estSizeB = measure.estimateDiameter(bright)\r\n estSizeB_rough = copy.copy(estSizeB)\r\n estSizeD_rough = copy.copy(estSizeD)\r\n # Get the average estimated size with a weight towards the thinner\r\n estSize = (min(estSizeD, estSizeB) + estSizeD + estSizeB) / 3\r\n # Result update \r\n self.write(results, \"Weighted average estimated diameter: \",\r\n round(estSize,3),\" pixels (\", round(estSize*pixelSize,3),\r\n \" nm).\", sep='')\r\n bright = inout.uint8(bright)\r\n dark = inout.uint8(dark)\r\n\r\n if showSteps and self.varSegment.get() != \"binary\":\r\n display.overlayMask(img, bright, title=\"Bright phase mask.\",\r\n animate=True)\r\n display.overlayMask(img, dark, title=\"Dark phase mask.\",\r\n animate=True)\r\n\r\n if savePDF and self.varSegment.get() != \"binary\":\r\n inout.pdfSaveOverlay(pdf, img, bright, title=\"Bright phase mask.\")\r\n inout.pdfSaveOverlay(pdf, img, dark, title=\"Dark phase mask.\")\r\n\r\n if self.varSaveMovie.get():\r\n stack = []\r\n for i in range(5):\r\n stack.append(gray2rgb(img))\r\n stack.append(display.overlayMask(img,bright,\r\n return_overlay=True))\r\n inout.save_movie(stack, ''.join((folder, '//', fname,\r\n ' - segment movie.mp4')))\r\n\r\n if self.varSaveBinary.get():\r\n inout.saveBinary(bright,\r\n folder+'/'+fname+' - Bright Segment--'+str(pixelSize)+'--.tif')\r\n inout.saveBinary(dark,\r\n folder+'/'+fname+' - Dark Segment--'+str(pixelSize)+'--.tif')\r\n\r\n \r\n \r\n # Calculate area fraction.\r\n if self.varAreaFraction.get():\r\n # Status update.\r\n self.write(status, \"Calculating area fraction.\")\r\n areaFracBright = np.count_nonzero(bright) / \\\r\n (np.count_nonzero(bright) + np.count_nonzero(dark))\r\n # Results update.\r\n self.write(results, \"Bright phase area fraction: %.3f\" \\\r\n %(areaFracBright))\r\n\r\n # Refine the estimated diameters.\r\n estSizeB, _ = measure.diameter(bright, estSize)\r\n estSizeD, _ = measure.diameter(dark, estSize)\r\n\r\n \r\n # Calculate the diameter.\r\n if self.varDiameter.get():\r\n # Status update.\r\n self.write(status, \"Calculating ligament diameter.\")\r\n # Calculate.\r\n diameter_average_bright, diameter_SD_bright, diameter_all_bright =\\\r\n measure.diameter(bright, estSizeB,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n diameter_num_bright = len(diameter_all_bright)\r\n diameter_average_dark, diameter_SD_dark, diameter_all_dark =\\\r\n measure.diameter(dark, estSizeD,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n diameter_num_dark = len(diameter_all_dark)\r\n # Update estimated ligament sizes.\r\n estSizeB = diameter_average_bright\r\n estSizeD = diameter_average_dark\r\n # Results update.\r\n self.write(results, \"Bright phase diameter:\",\r\n round(diameter_average_bright,2), \"±\",\r\n round(diameter_SD_bright), \"pixels.\")\r\n self.write(results, \"Dark phase diameter:\",\r\n round(diameter_average_dark,2), \"±\",\r\n round(diameter_SD_dark), \"pixels.\")\r\n\r\n if outputExcel:\r\n df = pd.DataFrame(diameter_all_bright)\r\n df.to_excel(writer, sheet_name=\"Bright phase diameter data\")\r\n df = pd.DataFrame(diameter_all_dark)\r\n df.to_excel(writer, sheet_name=\"Dark phase diameter data\") \r\n \r\n # Fix estSizeB and estSizeD if there is a problem\r\n estSizeB = estSizeB_rough if np.isnan(estSizeB) else estSizeB \r\n estSizeD = estSizeD_rough if np.isnan(estSizeD) else estSizeD \r\n \r\n # Calculate the ligament length.\r\n if self.varLength.get():\r\n # Status update.\r\n self.write(status, \"Calculating ligament length.\")\r\n length_average_bright, length_SD_bright, length_all_bright = \\\r\n measure.length(bright, estSizeB,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n try:\r\n length_num_bright = len(length_all_bright)\r\n except TypeError:\r\n length_num_bright = 0\r\n length_average_dark, length_SD_dark, length_all_dark = \\\r\n measure.length(dark, estSizeD,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n try:\r\n length_num_dark = len(length_all_dark)\r\n except TypeError:\r\n length_num_dark = 0\r\n # Results update.\r\n if length_average_bright != 0:\r\n self.write(results, \"Bright phase ligament length:\",\r\n round(length_average_bright,2), \"±\",\r\n round(length_SD_bright), \"pixels.\")\r\n if length_average_dark != 0:\r\n self.write(results, \"Dark phase ligament length:\",\r\n round(length_average_dark,2), \"±\",\r\n round(length_SD_dark), \"pixels.\")\r\n\r\n if outputExcel:\r\n if length_average_bright != 0:\r\n df = pd.DataFrame(length_all_bright)\r\n df.to_excel(writer, sheet_name=\"Bright phase length data\")\r\n if length_average_dark != 0:\r\n df = pd.DataFrame(length_all_dark)\r\n df.to_excel(writer, sheet_name=\"Dark phase length data\") \r\n\r\n # Calculate average object area.\r\n if self.varArea.get():\r\n # Status update.\r\n self.write(status, \"Calculating average object area.\")\r\n area_average_bright, area_SD_bright, area_all_bright = \\\r\n measure.area(bright, estSizeB,\r\n showSteps=showSteps,\r\n returnAll=True)\r\n try:\r\n area_num_bright = len(area_all_bright)\r\n except TypeError:\r\n area_num_bright = 0\r\n area_average_dark, area_SD_dark, area_all_dark = \\\r\n measure.area(dark, estSizeD,\r\n showSteps=showSteps,\r\n returnAll=True)\r\n try:\r\n area_num_dark = len(area_all_dark)\r\n except TypeError:\r\n area_num_dark = 0\r\n # Results update.\r\n if area_average_bright != 0:\r\n self.write(results, \"Bright phase average object area:\",\r\n round(area_average_bright,2), \"±\",\r\n round(area_SD_bright), \"pixels^2.\")\r\n if area_average_dark != 0:\r\n self.write(results, \"Dark phase average object area:\",\r\n round(area_average_dark,2), \"±\",\r\n round(area_SD_dark), \"pixels^2.\")\r\n\r\n if outputExcel:\r\n if area_average_bright != 0:\r\n df = pd.DataFrame(area_all_bright)\r\n df.to_excel(writer, sheet_name=\"Bright phase area data\")\r\n if area_average_dark != 0:\r\n df = pd.DataFrame(area_all_dark)\r\n df.to_excel(writer, sheet_name=\"Dark phase area data\") \r\n\r\n # Calculate the sum connected length of each object.\r\n if self.varSumConnectedLength.get():\r\n # Status update.\r\n self.write(status, \"Calculating sum connected ligament length.\")\r\n sumLength_average_bright, sumLength_SD_bright, \\\r\n sumLength_all_bright = \\\r\n measure.connectedLength(bright, estSizeB,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n try:\r\n sumLength_num_bright = len(sumLength_all_bright)\r\n except TypeError:\r\n sumLength_num_bright = 0\r\n sumLength_average_dark, sumLength_SD_dark, sumLength_all_dark = \\\r\n measure.connectedLength(dark, estSizeD,\r\n showSteps=showSteps,\r\n returnAll=True,\r\n pdf=pdf)\r\n try:\r\n sumLength_num_dark = len(sumLength_all_dark)\r\n except TypeError:\r\n sumLength_num_dark = 0\r\n # Results update.\r\n if sumLength_average_bright != 0:\r\n self.write(results, \"Bright phase sum connected length:\",\r\n round(sumLength_average_bright,2), \"±\",\r\n round(sumLength_SD_bright), \"pixels.\")\r\n if sumLength_average_dark != 0:\r\n self.write(results, \"Dark phase sum connected length:\",\r\n round(sumLength_average_dark,2), \"±\",\r\n round(sumLength_SD_dark), \"pixels.\")\r\n\r\n if outputExcel:\r\n if sumLength_average_bright != 0:\r\n df = pd.DataFrame(sumLength_all_bright)\r\n df.to_excel(writer,\r\n sheet_name=\"Bright phase sum length data\")\r\n if sumLength_average_dark != 0:\r\n df = pd.DataFrame(sumLength_all_dark)\r\n df.to_excel(writer,\r\n sheet_name=\"Dark phase sum length data\") \r\n\r\n # Save excel file\r\n if outputExcel:\r\n writer.save()\r\n\r\n # Save the pdf.\r\n if savePDF:\r\n plt.title('Summary')\r\n plt.axis('off')\r\n fs = 10\r\n yloc = 0.95\r\n xloc = 0.01\r\n space = 0.05\r\n out = 'Image: ' + fnametype\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n out = 'Pixel size: ' + str(pixelSize)\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n out = 'Bright phase area fraction: ' + str(round(areaFracBright,3))\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n out = 'Rough diameter estimate: ' + str(round(estSize,3))+' pixels.'\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space*2\r\n\r\n out = 'Bright phase:'\r\n plt.text(xloc, yloc, out, fontsize=fs, fontweight='bold')\r\n yloc -= space\r\n if diameter_average_bright != 0:\r\n out = ('Ligament diameter: ' +\r\n str(round(diameter_average_bright*pixelSize,3)) +\r\n ' ± ' + str(round(diameter_SD_bright*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if length_average_bright != 0:\r\n out = ('Ligament length: ' +\r\n str(round(length_average_bright*pixelSize,3)) +\r\n ' ± ' + str(round(length_SD_bright*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if area_average_bright != 0:\r\n out = ('Object area: ' +\r\n str(round(area_average_bright*pixelSize**2,3)) +\r\n ' ± ' + str(round(area_SD_bright*pixelSize**2,3)) +\r\n r' nm$^2$')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if sumLength_average_bright != 0:\r\n out = ('Sum connected length: ' +\r\n str(round(sumLength_average_bright*pixelSize,3)) +\r\n ' ± ' + str(round(sumLength_SD_bright*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n yloc -= space\r\n\r\n out = 'Dark phase:'\r\n plt.text(xloc, yloc, out, fontsize=fs, fontweight='bold')\r\n yloc -= space\r\n if diameter_average_dark != 0:\r\n out = ('Ligament diameter: ' +\r\n str(round(diameter_average_dark*pixelSize,3)) +\r\n ' ± ' + str(round(diameter_SD_dark*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if length_average_dark != 0:\r\n out = ('Ligament length: ' +\r\n str(round(length_average_dark*pixelSize,3)) +\r\n ' ± ' + str(round(length_SD_dark*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if area_average_dark != 0:\r\n out = ('Object area: ' +\r\n str(round(area_average_dark*pixelSize**2,3)) +\r\n ' ± ' + str(round(area_SD_dark*pixelSize**2,3)) +\r\n r' nm$^2$')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n if sumLength_average_dark != 0:\r\n out = ('Sum connected length: ' +\r\n str(round(sumLength_average_dark*pixelSize,3)) +\r\n ' ± ' + str(round(sumLength_SD_dark*pixelSize,3)) +\r\n ' nm')\r\n plt.text(xloc, yloc, out, fontsize=fs)\r\n yloc -= space\r\n \r\n\r\n pdf.savefig()\r\n plt.close()\r\n pdf.close()\r\n\r\n # Append data to the saveAll\r\n newdata = np.array(\r\n [[fnametype,\r\n round(pixelSize,2),\r\n round(areaFracBright,3),\r\n round(estSize*pixelSize,3),\r\n round(diameter_average_bright*pixelSize,3),\r\n round(diameter_SD_bright*pixelSize,3),\r\n diameter_num_bright,\r\n round(diameter_average_dark*pixelSize,3),\r\n round(diameter_SD_dark*pixelSize,3),\r\n diameter_num_dark,\r\n round(length_average_bright*pixelSize,3),\r\n round(length_SD_bright*pixelSize,3),\r\n length_num_bright,\r\n round(length_average_dark*pixelSize,3),\r\n round(length_SD_dark*pixelSize,3),\r\n length_num_dark,\r\n round(area_average_bright*pixelSize**2,3),\r\n round(area_SD_bright*pixelSize**2,3),\r\n area_num_bright,\r\n round(area_average_dark*pixelSize**2,3),\r\n round(area_SD_dark*pixelSize**2,3),\r\n area_num_dark,\r\n round(sumLength_average_bright*pixelSize,3),\r\n round(sumLength_SD_bright*pixelSize,3),\r\n sumLength_num_bright,\r\n round(sumLength_average_dark*pixelSize,3),\r\n round(sumLength_SD_dark*pixelSize,3),\r\n sumLength_num_dark\r\n ]])\r\n \r\n\r\n \r\n self.saveAll = np.append(self.saveAll, newdata, axis=0)\r\n\r\n # Add new line to status and results\r\n self.write(results, '')\r\n self.write(status, '')",
"def arb_units(wb_run,sample_run,ei_guess,rebin,map_file,**kwargs):\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=funcreturns.lhs_info('both')\n #n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n #repopulate defualts\n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n #-------------DIAG------------------------\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #---------------END of DIAG--------------------\n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n print 'one2one selected'\n \n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file=map_file+'.map' \n reducer.map_file = map_file\n\n reducer.energy_bins = rebin\n \n if float(str.split(rebin,',')[2])>=float(ei_guess):\n print 'error rebin range exceeds ei'\n return\n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n if kwargs.has_key('hardmaskOnly'):\n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking=mtd['mask_wksp']\n else:\n \n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n reducer.spectra_masks=masking\n #fail_list=get_failed_spectra_list(masking)\n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking)\n \n print 'Diag found ', len(fail_list),'bad spectra'\n \n #Run the conversion\n deltaE_wkspace = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n \n ei= (deltaE_wkspace.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace,OutputWorkspace=results_name)\n \n print 'Incident energy found ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n \n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n \n return mtd[wksp_out]",
"def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)",
"def main() -> None:\n try:\n input_image_path = argv[1]\n output_image_path = argv[2]\n\n color_spray( input_image_path, output_image_path )\n except IndexError:\n RuntimeError('Usage: INPUT_GRAY_IMAGE_PATH OUTPUT_RGB_IMAGE_PATH')\n return None"
]
| [
"0.67487633",
"0.63081914",
"0.62226206",
"0.59950316",
"0.57797056",
"0.5724643",
"0.5548937",
"0.5396003",
"0.53287005",
"0.53171885",
"0.5274112",
"0.5228013",
"0.52234906",
"0.52086335",
"0.5201472",
"0.5201053",
"0.5174415",
"0.5171564",
"0.51695555",
"0.51538444",
"0.51486605",
"0.51484054",
"0.5148395",
"0.5145963",
"0.51190144",
"0.5118445",
"0.5110906",
"0.51072323",
"0.5106969",
"0.51032764"
]
| 0.71805817 | 0 |
Run runcat on a single healpix pixel. All files will be placed in self.config.outpath (see self.__init__) | def run(self):
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running runcat on pixel %d" % (self.pixel))
runcat = RunCatalog(self.config)
if not os.path.isfile(runcat.filename):
runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)
runcat.output(savemembers=True, withversion=True)
self.config.stop_file_logging() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True,\n check_parfile=True, check_randfile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n self.config.start_file_logging()\n self.config.logger.info(\"Running zmask on pixel %d\" % (self.pixel))\n\n rand_zmask = RunRandomsZmask(self.config)\n\n if not os.path.isfile(rand_zmask.filename):\n rand_zmask.run()\n rand_zmask.output(savemembers=False, withversion=False)\n\n # All done\n self.config.stop_file_logging()",
"def _finalize_output(self, stackMean, stackUncert):\n # Select the type of output image to be built on the basis of the image\n # obsType.\n outImageClassDict = {\n 'BIAS': MasterBias,\n 'DARK': MasterDark,\n 'FLAT': MasterFlat,\n 'OBJECT': ReducedScience\n }\n outImageClass = outImageClassDict[self.imageList[0].obsType]\n\n # TODO: decide if it is a good idea to have an optional uncertainty...\n # # Properly handle the uncertainty provided\n # if stackUncert is not None:\n # outUncert = StdDevUncertainty(stackUncert)\n # else:\n # outUncert = None\n\n # Return that data to the user in a single AstroImage instance\n outImg = outImageClass(\n stackMean,\n uncertainty=StdDevUncertainty(stackUncert),\n header=self.imageList[0].header,\n properties={'unit': self.imageList[0].unit}\n )\n\n # Clean up any bad pixels in this image using the Inpointer class\n inpainter = Inpainter(outImg)\n outImg = inpainter.inpaint_nans()\n\n # If the output image is an ReducedScience and is not a supersky image,\n # then clear out the old astrometry and solve it anew!\n if (outImageClass is ReducedScience) and not self.is_supersky:\n # Clear out the old astrometry\n outImg.clear_astrometry()\n\n # Initalize an astrometry solver object\n astroSolver = AstrometrySolver(outImg)\n\n # Attempt to perform an astrometric solution\n temporaryImage, success = astroSolver.run()\n\n # If astrometry solution was successful, then replace the output\n if success: outImg = temporaryImage\n\n return outImg",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running zscan on pixel %d\" % (self.pixel))\n\n runzscan = RunZScan(self.config)\n if not os.path.isfile(runzscan.filename):\n runzscan.run()\n runzscan.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def test_single_resize_er(self):\n to_resize = base_path + '/test_data/rendering_tests/just_resize/original/faulty.jpg'\n to_output = base_path + '/test_data/rendering_tests/just_resize/results/'\n\n for the_file in os.listdir(to_output):\n file_path = os.path.join(to_output, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n capturedOutput = io.StringIO() # Create StringIO object\n sys.stdout = capturedOutput # and redirect stdout.\n rb.resize_and_crop(to_resize, to_output+\"faulty.jpg\", 300,300 )\n sys.stdout = sys.__stdout__ # Reset redirect.\n self.assertEqual(\"Image too small to be resized\\n\",capturedOutput.getvalue()) # Now works as before.",
"def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)",
"def unprocess(image):\n return image + MEAN_PIXEL",
"def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return",
"def test_rhostats_config():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_rhostats_config.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_rhostats.fits')\n rho_file = os.path.join('output','test_rhostats.pdf')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : { # Note: stats doesn't have to be a list.\n 'type': 'Rho',\n 'file_name': rho_file,\n 'min_sep': 30,\n 'max_sep': 600,\n 'sep_units': 'arcsec',\n 'bin_type': 'Linear',\n 'bin_size': 30,\n }\n },\n }\n piff.piffify(config, logger)\n assert os.path.isfile(rho_file)\n\n # repeat with plotify function\n os.remove(rho_file)\n piff.plotify(config, logger)\n assert os.path.isfile(rho_file)\n\n # Test rho statistics directly.\n min_sep = 1\n max_sep = 100\n bin_size = 0.1\n psf = piff.read(psf_file)\n orig_stars, wcs, pointing = piff.Input.process(config['input'], logger)\n stats = piff.RhoStats(min_sep=min_sep, max_sep=max_sep, bin_size=bin_size)\n with np.testing.assert_raises(RuntimeError):\n stats.write('dummy') # Cannot write before compute\n stats.compute(psf, orig_stars)\n\n rhos = [stats.rho1, stats.rho2, stats.rho3, stats.rho4, stats.rho5]\n for rho in rhos:\n # Test the range of separations\n radius = np.exp(rho.logr)\n np.testing.assert_array_less(radius, max_sep)\n np.testing.assert_array_less(min_sep, radius)\n # bin_size is reduced slightly to get integer number of bins\n assert rho.bin_size < bin_size\n assert np.isclose(rho.bin_size, bin_size, rtol=0.1)\n np.testing.assert_array_almost_equal(np.diff(rho.logr), rho.bin_size, decimal=5)\n\n # Test that the max absolute value of each rho isn't crazy\n np.testing.assert_array_less(np.abs(rho.xip), 1)\n\n # # Check that each rho isn't precisely zero. This means the sum of abs > 0\n np.testing.assert_array_less(0, np.sum(np.abs(rho.xip)))\n\n # Test using the piffify executable\n os.remove(rho_file)\n config['verbose'] = 0\n with open('rho.yaml','w') as f:\n f.write(yaml.dump(config, default_flow_style=False))\n piffify_exe = get_script_name('piffify')\n p = subprocess.Popen( [piffify_exe, 'rho.yaml'] )\n p.communicate()\n assert os.path.isfile(rho_file)\n\n # Test using the plotify executable\n os.remove(rho_file)\n plotify_exe = get_script_name('plotify')\n p = subprocess.Popen( [plotify_exe, 'rho.yaml'] )\n p.communicate()\n assert os.path.isfile(rho_file)\n\n # test running plotify with dir in config, with no logger, and with a modules specification.\n # (all to improve test coverage)\n config['output']['dir'] = '.'\n config['modules'] = [ 'custom_wcs' ]\n os.remove(rho_file)\n piff.plotify(config)\n assert os.path.isfile(rho_file)",
"def main():\n parser = argparse.ArgumentParser(\n description='Compute photometry.')\n parser.add_argument('filename', metavar='filename', nargs='+',\n help='Path to one or more input files to '\n 'modify in place.')\n parser.add_argument('-n', '--new', dest='overwrite',\n action='store_false', default=True,\n help='Set to write to _new.fits file instead '\n 'of overwriting the input.')\n parser.add_argument('-l', '--loglevel', dest='loglevel', type=str,\n action='store', default='INFO',\n help='Log level.')\n parser.add_argument('-z', '--fitsize', dest='fitsize', type=int,\n action='store', default=None,\n help='Fit subimage size (pix).')\n parser.add_argument('-s', '--srcpos', dest='srcpos', type=str,\n action='store', default=None,\n help='Estimated source position (x,y).')\n parser.add_argument('-f', '--fwhm', dest='fwhm', type=float,\n action='store', default=None,\n help='Estimated FWHM (pix).')\n parser.add_argument('-p', '--profile', dest='profile', type=str,\n action='store', default='moffat',\n help='Profile function (moffat, gaussian, '\n 'or lorentzian).')\n parser.add_argument('-r', '--aprad', dest='aprad', type=float,\n action='store', default=None,\n help='Aperture radius (pix).')\n parser.add_argument('-b', '--skyrad', dest='skyrad', type=str,\n action='store', default=None,\n help='Sky radii in pix (inner,outer).')\n parser.add_argument('-u', '--raw_units', dest='runits', type=str,\n action='store', default=None,\n help='Raw data units before calibration, '\n 'to use in header comments.')\n args = parser.parse_args()\n\n if args.srcpos is not None:\n try:\n srcpos = [float(x) for x in args.srcpos.split(',')]\n if len(srcpos) != 2:\n raise ValueError\n except ValueError:\n srcpos = None\n parser.error(\"Invalid srcpos argument.\")\n else:\n srcpos = None\n if args.skyrad is not None:\n try:\n skyrad = [float(x) for x in args.skyrad.split(',')]\n if len(skyrad) != 2:\n raise ValueError\n except ValueError:\n skyrad = None\n parser.error(\"Invalid skyrad argument.\")\n else:\n skyrad = None\n\n log.setLevel(args.loglevel.upper())\n for fname in args.filename:\n log.info('Running: {}'.format(fname))\n pipecal_applyphot(fname, srcpos=srcpos,\n fitsize=args.fitsize, fwhm=args.fwhm,\n profile=args.profile, aprad=args.aprad,\n skyrad=skyrad, runits=args.runits,\n overwrite=args.overwrite)\n log.info('')",
"def manipulations(path):\r\n\r\n print (\"\\n Working on %s\\n\" %(path))\r\n\r\n # Creates a folder with the results for the current image\r\n if not os.path.exists(\"Results\\\\%s\" %(path)):\r\n os.makedirs(\"Results\\\\%s\" %(path))\r\n\r\n # The variations made of the image\r\n func.pixelImage(path, 10, 10)\r\n func.animate(path)\r\n func.colorScale(path, 0)\r\n func.colorScale(path, 1)\r\n func.colorScale(path, 2)\r\n func.scan(path, 280)\r\n func.greyImage(path)\r\n func.colorSteps(path, 1)\r\n func.inverted(path)",
"def doTile(tile):\n global d, fmt, output, img, demag\n # get adjusted upper left coordinate for tile\n xstart,ystart=getCoords(tile)\n px = 256//demag\n tumor,blank=0,0\n for y in range(0,px):\n for x in range(0,px):\n curry,currx = y+ystart,x+xstart\n B,G,R = img.item(currx,curry,0),img.item(currx,curry,1),img.item(currx,curry,2)\n if B > 220 and G > 220 and R > 220:\n blank += 1\n if blank > (px**2)/2:\n print('removing %s' % tile)\n #os.remove(tile)\n return(1)\n if B < 70 and G > 180 and R < 70:\n tumor = 1\n print(\"%s tumor = %d\" % (tile,tumor))\n output.write(str(tumor)+',')\n \n blank = int(blank*2 > px**2)\n tumor = (tumor > 0)\n return(blank)",
"def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output",
"def read_single_image(image_entry, dir, offset_percent, output_size, normalize=True, rotate=True, preds=True):\r\n if preds:\r\n image_name = dir+'img_'+image_entry[0][1:-1]+'_'+str(image_entry[3])+'.jpg'\r\n if output_size[2] == 1:\r\n full_image = skio.imread(image_name, as_grey=True) # read in a greyscale\r\n else:\r\n full_image = skio.imread(image_name, as_grey=False)\r\n scaling = float(output_size[0])/float(image_entry[3])\r\n else:\r\n from skimage import transform as sktf\r\n image_name = dir + 'img_' + image_entry[0][1:-1] + '.jpg'\r\n if output_size[2] == 1:\r\n full_image = skio.imread(image_name, as_grey=True) # read in a greyscale\r\n else:\r\n full_image = skio.imread(image_name, as_grey=False)\r\n # scale and downsample the image here to reduce computation\r\n o_size = np.shape(full_image)\r\n scaling = float(output_size[0])/float(image_entry[3])\r\n o_shape = [int(scaling*o_size[0]), int(scaling*o_size[1])]\r\n full_image = sktf.resize(full_image, o_shape)\r\n image_size = np.shape(full_image)\r\n if normalize:\r\n full_image = image_normalize(full_image, image_size) # normalizes the image that was read in\r\n else:\r\n full_image = full_image\r\n # compute random center offsets\r\n cent_x = float(image_entry[1]) + float(image_entry[3])*rd.uniform(-1.0*float(offset_percent), float(offset_percent))\r\n cent_y = float(image_entry[2]) + float(image_entry[3])*rd.uniform(-1.0*float(offset_percent), float(offset_percent))\r\n # compute a corner of the image cutout to use as starting point for making matrix of cutout coordinates\r\n left_x = scaling*(cent_x - 0.5 * float(image_entry[3]))\r\n top_y = scaling*(cent_y - 0.5 * float(image_entry[3]))\r\n pixel_locations_x = np.zeros(output_size[0:2]) # create a 2D array to hold all the pixel locations of the cutout\r\n pixel_locations_y = np.zeros(output_size[0:2])\r\n for i in range(output_size[0]): # leverage the fact that along an axis, x/y locations are identical\r\n pixel_locations_x[:, i] = left_x + i*1.0\r\n pixel_locations_y[i, :] = top_y + i*1.0\r\n # ravel them to make easier to process\r\n pixel_locations_x = np.ravel(pixel_locations_x)\r\n pixel_locations_y = np.ravel(pixel_locations_y)\r\n if rotate:\r\n angle = rd.uniform(0.0, 6.284) # select a random rotation angle\r\n sina = math.sin(angle)\r\n cosa = math.cos(angle)\r\n rotmat = np.array([[cosa, -1.0 * sina], [sina, cosa]])\r\n # the rotation should occur about the center of the image cutout location, so translate the origin:\r\n rel_loc_x = [i - scaling*cent_x for i in pixel_locations_x]\r\n rel_loc_y = [i - scaling*cent_y for i in pixel_locations_y]\r\n # rotate the corners now\r\n for i in range(len(pixel_locations_x)):\r\n rotated_coord = np.matmul(rotmat, np.array([[rel_loc_x[i]], [rel_loc_y[i]]]))\r\n pixel_locations_x[i] = rotated_coord[0, 0] + scaling*cent_x\r\n pixel_locations_y[i] = rotated_coord[1, 0] + scaling*cent_y\r\n # now go ahead and use the rotated (or unrotated, if rotate=false) corners to actually extract the image cutout\r\n # first round corners to be the nearest integer\r\n pixel_locations_x = np.array([int(i) for i in pixel_locations_x])\r\n pixel_locations_y = np.array([int(i) for i in pixel_locations_y])\r\n # if the computed pixel locations are outside the bounds of the image, pad the image with black\r\n if (np.min(pixel_locations_x)<=0 or np.min(pixel_locations_y) <= 0\r\n or np.max(pixel_locations_x) >= image_size[1] or np.max(pixel_locations_y) >= image_size[0]):\r\n full_image, pixel_locations_x, pixel_locations_y = image_pad(full_image, pixel_locations_x, pixel_locations_y)\r\n \"\"\"debug\r\n print('x_cent '+str(scaling*cent_x))\r\n print('y_cent '+str(scaling*cent_y))\r\n viewer = ImageViewer(full_image)\r\n viewer.show()\r\n \"\"\"\r\n output_image = np.ravel(full_image[pixel_locations_y, pixel_locations_x])\r\n \"\"\"\r\n output_image = np.reshape(full_image[pixel_locations_y, pixel_locations_x], output_size)\r\n viewer2 = ImageViewer(output_image)\r\n viewer2.show()\r\n \"\"\"\r\n return output_image",
"def extraction(userinputs):\n #Set up required variables\n target_dir = userinputs['OUTDIR']\n seximage = userinputs['IMAGE']\n logging.info('Running sextractor on {}'.format(userinputs['IMAGE']))\n\n print 'Executing SExtractor on user selected image : ', seximage\n\n # Verify that file exists\n if os.path.exists(userinputs['DATA'] + '/' + seximage) == False:\n print 'File ' + seximage + ' could not be found in ' + userinputs['DATA']\n logging.critical(' Could not find {}. Quitting'.format(seximage))\n logging.debug('Looking for {} but unable to locate'.format(userinputs['DATA'] + '/' + seximage))\n filemanagement.shutdown('Quitting now...',userinputs)\n\n # Run sextractor\n logging.info('Start sextractor')\n os.chdir(target_dir + '/s_extraction')\n logging.debug('Changed dir to {}'.format(os.getcwd()))\n command = 'sex ' + userinputs['DATA'] + '/' + seximage + '[1] -c R2_wl_aa.config'\n os.system(command)\n os.chdir(target_dir)\n logging.debug('Changed working directory back to {}'.format(target_dir))\n\n # Read in results and make regions file of objects sextracted\n logging.info('Read in Sextractor catalog')\n xx, yy = np.loadtxt(target_dir + '/s_extraction/R2_wl_dpop_detarea.cat', unpack=True,\n skiprows=5, usecols=(0,1))\n\n outputfile = target_dir + '/s_extraction/catalog_ds9_sextractor.reg'\n\n logging.info('Writing region file from source extractor data')\n logging.debug('Sextractor file: {}'.format(target_dir + '/s_extraction/R2_wl_dpop_detarea.cat'))\n with open(outputfile, 'w') as file:\n file.write('global color=blue width=5 font=\"helvetica 15 normal roman\" highlite=1 \\n')\n file.write('image\\n')\n\n for i in range(len(xx)):\n newline = 'circle(' + str(xx[i]) + ',' + str(yy[i]) + ',7) \\n'\n file.write(newline)\n print ''\n print 'Check catalog_ds9_sextractor.reg in the /s_extraction directory for'\n print 'the quality of source extraction.'\n print ''\n\n return target_dir + '/s_extraction/R2_wl_dpop_detarea.cat'",
"def update_png_crusher():\n if os.path.exists(PNG_CRUSHER):\n return\n\n for path in glob.glob(os.path.join(libdot.BIN_DIR, '.png.crusher.*')):\n os.unlink(path)\n\n r = requests.get(PNG_CRUSHER_URL + '?format=TEXT')\n with open(PNG_CRUSHER, 'wb') as fp:\n fp.write(base64.b64decode(r.text))\n\n os.chmod(PNG_CRUSHER, 0o755)",
"def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)",
"def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()",
"def animorf (path, res, method=\"cleantests\", **kwargs):\n MoDirt=kwargs.get('MoDirt', 'Mo')\n Mask=kwargs.get('Mask', 0)\n genPoster=kwargs.get('genPoster', False)\n compareToStds = kwargs.get('compToStds',False)\n verbose = kwargs.get('verbose',False)\n autoMask = kwargs.get('autoMaskEdges',False)\n stdDir = kwargs.get('stdDir', 'standards/')\n \n # Standardize MoDirt to 'mo' or 'dirt' using checkMoDirt\n MoDirt = fun.checkMoDirt(MoDirt)\n \n filetypes = ['.tif', '.jpg', '.jpeg','.tiff']\n \n # Standardize the path string, and extract the name of the folder or image \n # file depending on whether the path is the path to a directory or image. \n if os.path.isdir(path):\n if path.endswith('/'):\n path = path[:-1]\n name = os.path.split(path)[1]\n elif os.path.splitext(path)[1] in filetypes:\n name = os.path.splitext(os.path.split(path)[1])[0]\n elif type(path)!=str: \n raise Exception(\"Path must be a string: %s\" % str(path))\n else: \n raise Exception(\"Invalid path name: %s\" % path)\n \n # Generate output folders\n outFolder = \"Output/Output_\"+name+'_'+method\n \n if genPoster: posterFolder = outFolder+'/PosterMaps/'\n \n if MoDirt == 'mo':\n mapFolder = os.path.join(outFolder,'PtMaps/')\n else:\n mapFolder = os.path.join(outFolder,'DirtMaps/')\n \n if not os.path.exists(mapFolder): os.makedirs(mapFolder)\n if not os.path.exists(mapFolder): os.makedirs(mapFolder)\n if genPoster and not os.path.exists(posterFolder): os.makedirs(posterFolder)\n \n \"\"\"Create Data Dictionary\"\"\"\n # Iterate through the images within a folder if the path is to a directory, \n # and run analyzeImg on each of image, then write the results to the Data \n # Dictionary. \n Data = {}\n \n # OPERATE ON FOLDER OF IMAGES ==============================================\n if os.path.isdir(path):\n \n # Get list of images in directory\n images = [f for f in os.listdir(path) if os.path.splitext(f)[1] in filetypes]\n # Create paths to those images\n imgPaths = [os.path.join(path,f) for f in images]\n imgPaths.sort()\n if Mask!=0:\n assert type(Mask)==str, \"\"\"\n 'Mask' kwarg must be a path to a directory\n if the 'path' variable is a path to a directory.\"\n \"\"\"\n assert os.path.isdir(Mask), \"\"\"\n 'Mask' kwarg must be a path to a directory\n if the 'path' variable is a path to a directory.\n \"\"\"\n # Get list of images in directory\n masks = [m for m in os.listdir(Mask) if os.path.splitext(m)[1] in filetypes]\n # Create paths to those images\n maskPaths = [os.path.join(Mask,m) for m in masks]\n # I am assuming the mask name will be the same as the corresponding \n # name in the image folder, so when both are sorted, they should match. \n maskPaths.sort() \n \n else:\n maskPaths = [0 for f in imgPaths]\n \n for i in range(len(images)):\n # Make the mask image from the mask path\n if Mask!=0: mask = fun.loadImg(maskPaths[i])\n else: mask=0\n # run analysis on the image\n statsDict, picts = analyzeImage(imgPaths[i], res, \n method=method, MoDirt=MoDirt, \n Mask=mask,autoMaskEdges=autoMask,\n stdDir=stdDir, verbose=verbose)\n imgName = os.path.splitext(images[i])[0]\n # Assign to Data Dictionary\n Data[imgName] = statsDict\n (threshed,\n poster) = picts\n threshed = threshed.astype(np.uint8)\n threshed[threshed!=0]=255\n poster = poster.astype(np.uint8)\n \n # Create the output images\n cv2.imwrite(mapFolder+imgName+'.png',\n threshed, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n if genPoster:\n cv2.imwrite(posterFolder+imgName+'.png',\n poster, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n \n # OPERATE ON A SINGLE IMAGE ================================================\n else:\n # run analysis on the image\n statsDict, picts = analyzeImage(path, res, \n method=method, MoDirt=MoDirt, \n Mask=Mask,autoMaskEdges=autoMask,\n stdDir=stdDir, verbose=verbose)\n Data[name] = statsDict\n (threshed,\n poster) = picts\n threshed = threshed.astype(np.uint8)\n threshed[threshed!=0]=255\n poster = poster.astype(np.uint8)\n poster[poster!=0]=255\n # Create the output images\n cv2.imwrite(mapFolder+name+'.png',\n threshed, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n if genPoster:\n cv2.imwrite(posterFolder+name+'.png',\n poster, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n \n \"\"\"Write the output to a CSV file\"\"\"\n filePath = os.path.join(outFolder,MoDirt.capitalize()+'_ouput_'+name+'.csv')\n CSV = gencsv.DataToCSV(filePath, name) \n CSV.writeDataFromDict(Data,FirstColHead='Image')\n CSV.closeCSVFile()",
"def run():\n renanme_action()\n\n write_anim()\n alc.save_file()",
"def main(frac: float):\n\n clean(\n create_sample(RAW_DATA_FILEPATH, \"data/interim\", frac),\n \"data/processed\",\n geojson=True,\n )",
"def main(config):\n file_paths_info = [('GLOFRIS','WATCH','ARG_inunriver_historical_000000000WATCH_1980_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP45','ARG_inunriver_rcp4p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('GLOFRIS','RCP85','ARG_inunriver_rcp8p5_0000GFDL-ESM2M_2030_rp01000.tif'),\n \t\t\t\t('FATHOM','AR_fluvial_undefended_merged','AR-FU-1000.tif'),\n \t\t\t\t('FATHOM','AR_pluvial_undefended_merged','AR-PU-1000.tif')\n \t\t\t\t]\n figure_names = ['GLOFRIS-WATCH-fluvial','GLOFRIS-RCP45-fluvial','GLOFRIS-RCP85-fluvial','FATHOM-fluvial','FATHOM-pluvial']\n figure_titles = ['current fluvial flooding','RCP4.5 fluvial flooding','RCP8.5 fluvial flooding','current fluvial flooding','current pluvial flooding']\n for f_i in range(len(file_paths_info)):\n\t hazard_file = os.path.join(config['paths']['data'],'flood_data', file_paths_info[f_i][0],file_paths_info[f_i][1],file_paths_info[f_i][2])\n\t output_file = os.path.join(config['paths']['figures'], 'flood-map-{}.png'.format(figure_names[f_i]))\n\t ax = get_axes()\n\t plot_basemap(ax, config['paths']['data'])\n\t scale_bar(ax, location=(0.8, 0.05))\n\t plot_basemap_labels(ax, config['paths']['data'], include_regions=True,include_zorder=3)\n\n\t proj_lat_lon = ccrs.PlateCarree()\n\n\n\t # Create color map\n\t colors = plt.get_cmap('Blues')\n\n\t # Read in raster data\n\t data, lat_lon_extent = get_data(hazard_file)\n\t data[(data <= 0) | (data > 5)] = np.nan\n\t max_val = np.nanmax(data)\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val)\n\n\t # Plot population data\n\t im = ax.imshow(data, extent=lat_lon_extent,transform=proj_lat_lon, cmap=colors,norm =norm, zorder=2)\n\n\t # Add colorbar\n\t cbar = plt.colorbar(im, ax=ax,fraction=0.1, shrink=0.87,pad=0.01, drawedges=False, orientation='horizontal',\n\t norm=mpl.colors.Normalize(vmin=0, vmax=max_val), ticks=list(np.linspace(0,max_val,3)))\n\t cbar.set_clim(vmin=0,vmax=max_val)\n\n\n\t cbar.outline.set_color(\"none\")\n\t cbar.ax.yaxis.set_tick_params(color='black')\n\t cbar.ax.set_xlabel('Flood depths (m)',fontsize=12,color='black')\n\n\t plt.title('1 in 1000 year {}'.format(figure_titles[f_i]), fontsize = 14)\n\t save_fig(output_file)\n\t plt.close()",
"def do_stuff(self):\n self.create_tourism_raster()",
"def main():\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n # shrink function\r\n after_shrink = shrink('images/poppy.png')\r\n after_shrink.show()",
"def run(self):\n visualize_hydro_radial(self.config, self.logger)",
"def make(\n self,\n sample=10,\n scale=1,\n percentage=0,\n filename_addition=\"_halftoned\",\n angles=[0, 15, 30, 45],\n style=\"color\",\n antialias=False,\n output_format=\"default\",\n output_quality=75,\n save_channels=False,\n save_channels_format=\"default\",\n save_channels_style=\"color\",\n ):\n\n self.check_arguments(\n angles=angles,\n antialias=antialias,\n output_format=output_format,\n output_quality=output_quality,\n percentage=percentage,\n sample=sample,\n save_channels=save_channels,\n save_channels_format=save_channels_format,\n save_channels_style=save_channels_style,\n scale=scale,\n style=style,\n )\n\n f, extension = os.path.splitext(self.path)\n\n if output_format == \"jpeg\":\n extension = \".jpg\"\n elif output_format.startswith(\"png\"):\n extension = \".png\"\n # Else, keep the same as the input file.\n\n output_filename = \"%s%s%s\" % (f, str(filename_addition), extension)\n\n try:\n im = Image.open(self.path)\n except IOError as e:\n raise Exception(\"Couldn't open source file '%s'\" % (self.path)) from e\n\n if style == \"grayscale\":\n angles = angles[:1]\n gray_im = im.convert(\"L\")\n channel_images = self.halftone(\n im, gray_im, sample, scale, angles, antialias\n )\n new = channel_images[0]\n\n else:\n cmyk = self.gcr(im, percentage)\n channel_images = self.halftone(im, cmyk, sample, scale, angles, antialias)\n\n if save_channels:\n\n self.save_channel_images(\n channel_images,\n channels_style=save_channels_style,\n channels_format=save_channels_format,\n output_filename=output_filename,\n output_quality=output_quality,\n )\n\n new = Image.merge(\"CMYK\", channel_images)\n\n if extension == \".jpg\":\n new.save(output_filename, \"JPEG\", subsampling=0, quality=output_quality)\n elif extension == \".png\":\n new.convert(\"RGB\").save(output_filename, \"PNG\")",
"def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)",
"def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')",
"def on_run(self):\n self.set_illumination({'mode': 'breathe'})"
]
| [
"0.6371441",
"0.5963486",
"0.5613234",
"0.555862",
"0.5553763",
"0.5502191",
"0.5487119",
"0.5415223",
"0.538835",
"0.5333105",
"0.5228279",
"0.52231663",
"0.5197447",
"0.5101565",
"0.5088998",
"0.50669104",
"0.5051589",
"0.50342584",
"0.5012079",
"0.49999022",
"0.49969113",
"0.4966514",
"0.49617356",
"0.49482942",
"0.4939517",
"0.4939343",
"0.49384877",
"0.4929532",
"0.492156",
"0.49183905"
]
| 0.78551203 | 0 |
Run zmask on a single healpix pixel. This method will check if files already exist, and will skip any steps that already exist. The border radius will automatically be calculated based on the richest possible cluster at the lowest possible redshift. All files will be placed in self.config.outpath (see self.__init__) | def run(self):
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=False, check_bkgfile=True,
check_parfile=True, check_randfile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
self.config.start_file_logging()
self.config.logger.info("Running zmask on pixel %d" % (self.pixel))
rand_zmask = RunRandomsZmask(self.config)
if not os.path.isfile(rand_zmask.filename):
rand_zmask.run()
rand_zmask.output(savemembers=False, withversion=False)
# All done
self.config.stop_file_logging() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running zscan on pixel %d\" % (self.pixel))\n\n runzscan = RunZScan(self.config)\n if not os.path.isfile(runzscan.filename):\n runzscan.run()\n runzscan.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running runcat on pixel %d\" % (self.pixel))\n\n runcat = RunCatalog(self.config)\n if not os.path.isfile(runcat.filename):\n runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)\n runcat.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()",
"def analyze_images_in_folder(self, folder, generate_zmax = False, show_result = True, save_mask = True, save_excel = True):\r\n flat_cell_counted_in_folder = 0 \r\n total_cells_counted_in_folder = 0\r\n \r\n # If need to do zmax projection first\r\n if generate_zmax == True:\r\n ProcessImage.cam_screening_post_processing(folder)\r\n # Here a new folder for maxProjection is generated inside, change the path\r\n folder = os.path.join(folder, 'maxProjection')\r\n \r\n # If background images are taken\r\n if os.path.exists(os.path.join(folder, 'background')):\r\n # If the background image is taken to substract out\r\n background_substraction = True\r\n \r\n # Get all the background files names\r\n background_fileNameList = []\r\n for file in os.listdir(os.path.join(folder, 'background')):\r\n if \"tif\" in file: \r\n background_fileNameList.append(os.path.join(folder, 'background', file))\r\n \r\n background_image = ProcessImage.image_stack_calculation(background_fileNameList, operation = \"mean\")\r\n \r\n # Get a list of file names\r\n fileNameList = []\r\n for file in os.listdir(folder):\r\n if \"tif\" in file and \"LED\" not in file:\r\n fileNameList.append(file)\r\n \r\n print(fileNameList)\r\n \r\n # Analyse each image\r\n for image_file_name in fileNameList:\r\n print(image_file_name)\r\n Rawimage = imread(os.path.join(folder, image_file_name))\r\n\r\n if background_substraction == True:\r\n Rawimage = np.abs(Rawimage - background_image)\r\n \r\n # Analyze each image\r\n # Run the detection on input image.\r\n MLresults = self.DetectionOnImage(Rawimage, axis = None, show_result = show_result)\r\n\r\n if save_mask == True:\r\n \r\n if not os.path.exists(os.path.join(folder, 'ML_masks')):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'ML_masks')) \r\n \r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(Rawimage, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection Rawimage\r\n fig_name = os.path.join(folder, 'ML_masks', 'ML_mask_{}.png'.format(image_file_name[0:len(image_file_name)-4]))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n if flat_cell_counted_in_folder == 0:\r\n cell_Data, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n else: \r\n Cell_Data_new, flat_cell_counted_in_folder, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, image_file_name, flat_cell_counted_in_folder)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n total_cells_counted_in_folder += total_cells_counted_in_coord\r\n \r\n if save_excel == True:\r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(folder, 'CellsProperties_{}flat_outof_{}cells.xlsx'.format(flat_cell_counted_in_folder, total_cells_counted_in_folder)))\r\n \r\n return cell_Data",
"def object_mask(self):\n\n # Region file directory files\n if isinstance(self._region_file_dir, list):\n reg_files = {self._keyfunct(f): f for f in chain.from_iterable(glob.glob(f'{reg_dir}/*.reg')\n for reg_dir in self._region_file_dir)}\n else:\n reg_files = {self._keyfunct(f): f for f in glob.glob(f'{self._region_file_dir}/*.reg')}\n\n # Select out the IDs of the clusters needing additional masking\n clusters_to_mask = set(reg_files).intersection(self._catalog_dictionary)\n\n for cluster_id in clusters_to_mask:\n cluster_info = self._catalog_dictionary.get(cluster_id, None)\n region_file = reg_files.get(cluster_id, None)\n\n pixel_map_path = cluster_info['cov_mask_path']\n\n # Read in the coverage mask data and header.\n good_pix_mask, header = fits.getdata(pixel_map_path, header=True, ignore_missing_end=True, memmap=False)\n\n # Read in the WCS from the coverage mask we made earlier.\n w = WCS(header)\n\n try:\n assert w.pixel_scale_matrix[0, 1] == 0.\n pix_scale = (w.pixel_scale_matrix[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n except AssertionError:\n cd = w.pixel_scale_matrix\n _, eig_vec = np.linalg.eig(cd)\n cd_diag = np.linalg.multi_dot([np.linalg.inv(eig_vec), cd, eig_vec])\n pix_scale = (cd_diag[1, 1] * w.wcs.cunit[1]).to(u.arcsec).value\n\n # Open the regions file and get the lines containing the shapes.\n with open(region_file, 'r') as region:\n objs = [ln.strip() for ln in region\n if ln.startswith('circle') or ln.startswith('box') or ln.startswith('ellipse')]\n\n # For each shape extract the defining parameters and define a path region.\n shapes_to_mask = []\n for mask in objs:\n\n # For circle shapes we need the center coordinate and the radius.\n if mask.startswith('circle'):\n # Parameters of circle shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region radius in arcseconds\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n # \"0\" is to correct the pixel coordinates to the right origin for the data.\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape.\n shape = Path.circle(center=cent_xy, radius=params[2] / pix_scale)\n\n # For the box we'll need...\n elif mask.startswith('box'):\n # Parameters for box shape are as follows:\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region width in arcseconds\n # params[3] : region height in arcseconds\n # params[4] : rotation of region about the center in degrees\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system.\n cent_x, cent_y = w.wcs_world2pix(params[0], params[1], 0)\n\n # Vertices of the box are needed for the path object to work.\n verts = [[cent_x - 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y + 0.5 * (params[3] / pix_scale)],\n [cent_x + 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)],\n [cent_x - 0.5 * (params[2] / pix_scale), cent_y - 0.5 * (params[3] / pix_scale)]]\n\n # For rotations of the box.\n rot = Affine2D().rotate_deg_around(cent_x, cent_y, degrees=params[4])\n\n # Generate the mask shape.\n shape = Path(verts).transformed(rot)\n\n elif mask.startswith('ellipse'):\n # Parameters for ellipse shape are as follows\n # params[0] : region center RA in degrees\n # params[1] : region center Dec in degrees\n # params[2] : region semi-major axis in arcseconds\n # params[3] : region semi-minor axis in arcseconds\n # params[4] : rotation of region about the center in degrees\n # Note: For consistency, the semi-major axis should always be aligned along the horizontal axis\n # before rotation\n params = np.array(re.findall(r'[+-]?\\d+(?:\\.\\d+)?', mask), dtype=np.float64)\n\n # Convert the center coordinates into pixel system\n cent_xy = w.wcs_world2pix(params[0], params[1], 0)\n\n # Generate the mask shape\n shape = Ellipse(cent_xy, width=params[2] / pix_scale, height=params[3] / pix_scale, angle=params[4])\n shape = shape.get_path()\n\n # Return error if mask shape isn't known.\n else:\n raise KeyError(\n f'Mask shape is unknown, please check the region file of cluster: {region_file} {mask}')\n\n shapes_to_mask.append(shape)\n\n # Check if the pixel values are within the shape we defined earlier.\n # If true, set the pixel value to 0.\n pts = list(product(range(w.pixel_shape[0]), range(w.pixel_shape[1])))\n\n shape_masks = np.array(\n [shape.contains_points(pts).reshape(good_pix_mask.shape) for shape in shapes_to_mask])\n\n # Combine all the shape masks into a final object mask, inverting the boolean values so we can multiply\n # our mask with our existing good pixel mask\n total_obj_mask = ~np.logical_or.reduce(shape_masks)\n\n # Apply the object mask to the existing good pixel mask\n good_pix_mask *= total_obj_mask.astype(int)\n\n # Write the new mask to disk overwriting the old mask.\n new_mask_hdu = fits.PrimaryHDU(good_pix_mask, header=header)\n new_mask_hdu.writeto(pixel_map_path, overwrite=True)",
"def _run(top_reflectivity_dir_name, top_echo_classifn_dir_name, mask_file_name,\n first_date_string, last_date_string, plot_all_heights,\n daily_times_seconds, spatial_downsampling_factor,\n expand_to_satellite_grid, top_output_dir_name):\n\n border_latitudes_deg_n, border_longitudes_deg_e = border_io.read_file()\n\n if spatial_downsampling_factor <= 1:\n spatial_downsampling_factor = None\n\n if top_echo_classifn_dir_name == '':\n top_echo_classifn_dir_name = None\n\n if mask_file_name == '':\n mask_dict = None\n else:\n print('Reading mask from: \"{0:s}\"...'.format(mask_file_name))\n mask_dict = radar_io.read_mask_file(mask_file_name)\n\n if expand_to_satellite_grid:\n mask_dict = radar_io.expand_to_satellite_grid(\n any_radar_dict=mask_dict\n )\n\n if spatial_downsampling_factor > 1:\n mask_dict = radar_io.downsample_in_space(\n any_radar_dict=mask_dict,\n downsampling_factor=spatial_downsampling_factor\n )\n\n error_checking.assert_is_geq_numpy_array(daily_times_seconds, 0)\n error_checking.assert_is_less_than_numpy_array(\n daily_times_seconds, DAYS_TO_SECONDS\n )\n\n input_file_names = radar_io.find_many_files(\n top_directory_name=top_reflectivity_dir_name,\n first_date_string=first_date_string,\n last_date_string=last_date_string,\n file_type_string=radar_io.REFL_TYPE_STRING,\n raise_error_if_any_missing=False\n )\n\n if top_echo_classifn_dir_name is None:\n echo_classifn_file_names = None\n else:\n echo_classifn_file_names = [\n radar_io.find_file(\n top_directory_name=top_echo_classifn_dir_name,\n valid_date_string=radar_io.file_name_to_date(f),\n file_type_string=radar_io.ECHO_CLASSIFN_TYPE_STRING,\n raise_error_if_missing=True\n )\n for f in input_file_names\n ]\n\n for i in range(len(input_file_names)):\n print('Reading data from: \"{0:s}\"...'.format(input_file_names[i]))\n reflectivity_dict = radar_io.read_reflectivity_file(\n netcdf_file_name=input_file_names[i], fill_nans=True\n )\n\n if top_echo_classifn_dir_name is None:\n echo_classifn_dict = None\n else:\n echo_classifn_dict = radar_io.read_echo_classifn_file(\n echo_classifn_file_names[i]\n )\n\n _plot_radar_one_day(\n reflectivity_dict=reflectivity_dict,\n echo_classifn_dict=echo_classifn_dict, mask_dict=mask_dict,\n plot_all_heights=plot_all_heights,\n daily_times_seconds=daily_times_seconds,\n border_latitudes_deg_n=border_latitudes_deg_n,\n border_longitudes_deg_e=border_longitudes_deg_e,\n spatial_downsampling_factor=spatial_downsampling_factor,\n expand_to_satellite_grid=expand_to_satellite_grid,\n top_output_dir_name=top_output_dir_name\n )\n\n if i != len(input_file_names) - 1:\n print(SEPARATOR_STRING)",
"def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)",
"def FluorescenceAnalysis(self, folder, round_num, save_mask = True):\r\n RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zmax')\r\n # RoundNumberList, CoordinatesList, fileNameList = self.retrive_scanning_scheme(folder, file_keyword = 'Zfocus')\r\n \r\n if not os.path.exists(os.path.join(folder, 'MLimages_{}'.format(round_num))):\r\n # If the folder is not there, create the folder\r\n os.mkdir(os.path.join(folder, 'MLimages_{}'.format(round_num))) \r\n \r\n for EachRound in RoundNumberList:\r\n \r\n cells_counted_in_round = 0\r\n \r\n if EachRound == round_num:\r\n \r\n # Start numbering cells at each round\r\n self.cell_counted_inRound = 0 \r\n \r\n for EachCoord in CoordinatesList:\r\n \r\n # =============================================================================\r\n # For tag fluorescence:\r\n # ============================================================================= \r\n print(EachCoord)\r\n #-------------- readin image---------------\r\n for Eachfilename in enumerate(fileNameList):\r\n if EachCoord in Eachfilename[1] and EachRound in Eachfilename[1]:\r\n if '0Zmax' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-14] # get rid of '_PMT_0Zmax.tif' in the name.\r\n elif '0Zfocus' in Eachfilename[1]:\r\n ImgNameInfor = Eachfilename[1][0:len(Eachfilename[1])-16] # get rid of '_PMT_0Zfocus.tif' in the name.\r\n _imagefilename = os.path.join(folder, Eachfilename[1])\r\n #------------------------------------------\r\n \r\n # =========================================================================\r\n # USING MASKRCNN...\r\n # =========================================================================\r\n # Imagepath = self.Detector._fixPathName(_imagefilename)\r\n Rawimage = imread(_imagefilename)\r\n \r\n# if ClearImgBef == True:\r\n# # Clear out junk parts to make it esaier for ML detection.\r\n# RawimageCleared = self.preProcessMLimg(Rawimage, smallest_size=300, lowest_region_intensity=0.16)\r\n# else:\r\n# RawimageCleared = Rawimage.copy()\r\n \r\n image = ProcessImage.convert_for_MaskRCNN(Rawimage)\r\n \r\n # Run the detection on input image.\r\n results = self.Detector.detect([image])\r\n \r\n MLresults = results[0]\r\n \r\n if save_mask == True:\r\n fig, ax = plt.subplots()\r\n # Set class_names = [None,None,None,None] to mute class name display.\r\n visualize.display_instances(image, MLresults['rois'], MLresults['masks'], MLresults['class_ids'],\r\n class_names = [None,None,None,None], ax=ax,\r\n centre_coors = MLresults['Centre_coor'], Centre_coor_radius = 2, \r\n WhiteSpace = (0, 0))#MLresults['class_ids'],MLresults['scores'], \r\n # ax.imshow(fig)\r\n fig.tight_layout()\r\n # Save the detection image\r\n fig_name = os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor))\r\n plt.savefig(fname = fig_name, dpi=200, pad_inches=0.0, bbox_inches='tight')\r\n \r\n # segmentationImg = Image.fromarray(fig) #generate an image object\r\n # segmentationImg.save(os.path.join(folder, 'MLimages_{}\\{}.tif'.format(round_num, ImgNameInfor)))#save as tif\r\n \r\n if self.cell_counted_inRound == 0:\r\n cell_Data, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n else: \r\n Cell_Data_new, self.cell_counted_inRound, total_cells_counted_in_coord = \\\r\n ProcessImage.retrieveDataFromML(Rawimage, MLresults, str(ImgNameInfor), self.cell_counted_inRound)\r\n if len(Cell_Data_new) > 0:\r\n cell_Data = cell_Data.append(Cell_Data_new)\r\n \r\n # Count in total how many flat and round cells are identified.\r\n cells_counted_in_round += total_cells_counted_in_coord\r\n \r\n print(\"Number of round/flat cells in this round: {}\".format(cells_counted_in_round))\r\n \r\n # Save to excel\r\n cell_Data.to_excel(os.path.join(os.path.join(folder, round_num + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_CellsProperties.xlsx')))\r\n \r\n return cell_Data",
"def test_make_mask_w_output_file(self):\n output_mask = footprint_mask(\n os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\",\n reference_im=os.path.join(data_dir, \"sample_geotiff.tif\"),\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(output_mask, truth_mask)\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))",
"def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)",
"def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy",
"def mask_tif(shape_path, main_dir, results_dir):\n file_name_list, path_list = eliminate_nanoverlap(main_dir, shape_path)\n\n rel_orbit_number_list = []\n for i, name in enumerate(file_name_list):\n filename = name[0:28] + \"manifest.safe\"\n path_name = path_list[i]\n rel_orbit_number = \"_\" + xml_extract(path=path_name, file=filename)\n rel_orbit_number_list.append(rel_orbit_number)\n\n shapes = import_polygons(shape_path)\n\n # Print info, what step is currently processed:\n print(\"Cliping overlapping files to ROI...\")\n\n # Create necessary folder for the output:\n VH_folder = results_dir + \"VH/\"\n VH_Asc_folder = VH_folder + \"Asc/\"\n VH_Desc_folder = VH_folder + \"Desc/\"\n if not os.path.exists(VH_folder):\n os.mkdir(VH_folder)\n os.mkdir(VH_Asc_folder)\n os.mkdir(VH_Desc_folder)\n\n VV_folder = results_dir + \"VV/\"\n VV_Asc_folder = VV_folder + \"Asc/\"\n VV_Desc_folder = VV_folder + \"Desc/\"\n if not os.path.exists(VV_folder):\n os.mkdir(VV_folder)\n os.mkdir(VV_Asc_folder)\n os.mkdir(VV_Desc_folder)\n\n # Iterate through all files, which overlap with the ROI (return from \"eliminate_nanoverlap\" function)\n for i, file in enumerate(file_name_list):\n file_name = path_list[i] + file_name_list[i]\n\n if os.path.exists(VH_Asc_folder + file[10:len(file)]):\n continue\n if os.path.exists(VH_Desc_folder + file[10:len(file)]):\n continue\n if os.path.exists(VV_Asc_folder + file[10:len(file)]):\n continue\n if os.path.exists(VV_Desc_folder + file[10:len(file)]):\n continue\n\n # Clip files to extent of ROI:\n src1 = rio.open(file_name)\n out_image, out_transform = rio.mask.mask(src1, [shapes[0]], all_touched=0, crop=True, nodata=np.nan)\n out_meta = src1.meta\n out_meta.update({\"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"transform\": out_transform})\n\n # Write subsets to corresponding folders and rename files to be sorted by date:\n flight_dir = file_name_list[i][file_name_list[i].index(\"___\") + 3:file_name_list[i].index(\"___\") + 4]\n polarization = file_name_list[i][file_name_list[i].index(\"grd\") - 3:file_name_list[i].index(\"grd\") - 1]\n if polarization == \"VH\":\n if flight_dir == \"A\":\n with rasterio.open(\n VH_Asc_folder + file[10:len(file)-4] + \"_\" + file[0:3] + rel_orbit_number_list[i] + \".tif\", \"w\",\n **out_meta) as dest:\n dest.write(out_image)\n if flight_dir == \"D\":\n with rasterio.open(\n VH_Desc_folder + file[10:len(file)-4] + \"_\" + file[0:3] + rel_orbit_number_list[i] + \".tif\",\n \"w\", **out_meta) as dest:\n dest.write(out_image)\n if polarization == \"VV\":\n if flight_dir == \"A\":\n with rasterio.open(\n VV_Asc_folder + file[10:len(file)-4] + \"_\" + file[0:3] + rel_orbit_number_list[i] + \".tif\", \"w\",\n **out_meta) as dest:\n dest.write(out_image)\n if flight_dir == \"D\":\n with rasterio.open(\n VV_Desc_folder + file[10:len(file)-4] + \"_\" + file[0:3] + rel_orbit_number_list[i] + \".tif\",\n \"w\", **out_meta) as dest:\n dest.write(out_image)\n return [VH_Asc_folder, VH_Desc_folder, VV_Asc_folder, VV_Desc_folder]",
"def load_mask_one_layer(self, image_id):\r\n mask_path = self.mask_path[self.ids[image_id]]\r\n file_pattern = os.path.join(mask_path, \"*.png\")\r\n info = self.image_info[image_id]\r\n mask_files = glob.glob(file_pattern)\r\n #mask_tmp = cv2.imread(mask_files[0])\r\n mask_new = np.zeros([info['width'], info['height'], mask_files.__len__()+1], dtype=np.uint8) # one more for background\r\n count = 1\r\n mask_total = 0\r\n for i in mask_files:\r\n mask = cv2.imread(i)\r\n mask = mask[:, :, 1] / 255.0\r\n #mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')\r\n mask_new[:, :, count] = (mask)\r\n mask_total = mask_total * (mask == 0)\r\n mask_total = mask_total + (mask>0) * count\r\n count = count + 1\r\n return mask_total",
"def mask_images(self, folder_name, mask_image_name):\n\n photo_list = self.get_photo_list(folder_name)\n masked_folder_name = folder_name + '_background'\n\n try:\n print(\"Making dir \" + str(masked_folder_name) + \" for masking\")\n os.mkdir(masked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this masking??\")\n return\n\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(i)\n print (folder_name + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n masked_image = img\n\n size = img.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if full_mask_image[row_pixel, column_pixel] != 0:\n masked_image[row_pixel, column_pixel] = img[row_pixel, column_pixel]\n\n else:\n masked_image[row_pixel, column_pixel] = 0\n\n cv2.imwrite(masked_folder_name + '/' + image_name, masked_image.astype(np.uint16))",
"def test_make_mask_w_file_and_transform(self):\n output_mask = road_mask(\n os.path.join(data_dir, 'sample_roads_for_masking.geojson'),\n reference_im=os.path.join(data_dir, 'road_mask_input.tif'),\n width=4, meters=True, do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(\n os.path.join(data_dir, 'sample_road_raster_mask.tif')\n )\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(output_mask, truth_mask)\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))",
"def simulate_source_mask(binary, n_holes, hole_radius_arcmin):\n\n mask = binary.copy()\n if binary.pixel == \"HEALPIX\":\n idx = np.where(binary.data == 1)\n for i in range(n_holes):\n random_index1 = np.random.choice(idx[0])\n vec = hp.pixelfunc.pix2vec(binary.nside, random_index1)\n disc = hp.query_disc(binary.nside, vec, hole_radius_arcmin / (60.0 * 180) * np.pi)\n mask.data[disc] = 0\n\n if binary.pixel == \"CAR\":\n random_index1 = np.random.randint(0, binary.data.shape[0], size=n_holes)\n random_index2 = np.random.randint(0, binary.data.shape[1], size=n_holes)\n mask.data[random_index1, random_index2] = 0\n dist = enmap.distance_transform(mask.data)\n mask.data[dist * 60 * 180 / np.pi < hole_radius_arcmin] = 0\n\n return mask",
"def crop_acc_mask(images_dir, images_output_dir, masks_dir, mask_suffix=None, masks_output_dir=None): \n image_suffix_list = [\"C0\", \"DE\", \"T2\"]\n if not os.path.exists(images_output_dir):\n os.makedirs(images_output_dir)\n if masks_output_dir is not None and (not os.path.exists(masks_output_dir)):\n os.makedirs(masks_output_dir)\n margin = [0, 30, 30]\n masks_list = os.listdir(masks_dir)\n masks_list.sort()\n json_dict = OrderedDict()\n for mask in masks_list:\n mask_path = os.path.join(masks_dir, mask)\n if mask.endswith(\".nii.gz\"):\n print(\"#\" * 11 *11)\n print(mask_path)\n mask_sitk = sitk.ReadImage(mask_path)\n mask_npy = sitk.GetArrayFromImage(mask_sitk)\n mask_shape = mask_npy.shape\n crop_bbox_min, crop_bbox_max = get_ND_bounding_box(mask_npy, margin=margin)\n # do not crop along depth dimension\n crop_bbox_min[0] = 0\n crop_bbox_max[0] = mask_shape[0]\n print(crop_bbox_min, crop_bbox_max)\n json_dict[mask_path] = {\"crop_bbox_min\": crop_bbox_min, \"crop_bbox_max\": crop_bbox_max}\n mask_output_npy = crop_ND_volume_with_bounding_box(mask_npy, crop_bbox_min, crop_bbox_max)\n if mask_suffix is not None:\n mask = mask.replace(\"_\" + mask_suffix + \".nii.gz\", \".nii.gz\")\n if masks_output_dir is not None:\n save_cropped_array_as_nifty_volume(mask_output_npy, os.path.join(masks_output_dir, mask), mask_sitk)\n save_cropped_array_as_nifty_volume(convert_label(mask_output_npy, [1, 2, 3, 4, 5], [1, 2, 3, 1, 1]), \\\n os.path.join(images_output_dir, mask.replace(\".nii.gz\", \"_{0:04d}.nii.gz\".format(len( \\\n image_suffix_list)))), mask_sitk)\n for i, image_suffix in enumerate(image_suffix_list):\n image = mask.replace(\".nii.gz\", \"_{}.nii.gz\".format(image_suffix))\n image_path = os.path.join(images_dir, image)\n print(image_path)\n image_sitk = sitk.ReadImage(image_path)\n image_npy = sitk.GetArrayFromImage(image_sitk)\n image_output_npy = crop_ND_volume_with_bounding_box(image_npy, crop_bbox_min, crop_bbox_max)\n save_cropped_array_as_nifty_volume(image_output_npy, os.path.join(images_output_dir, mask.replace( \\\n \".nii.gz\", \"_{0:04d}.nii.gz\".format(i))), image_sitk)\n save_json(json_dict, os.path.join(images_output_dir, \"crop_information.json\"))\n if masks_output_dir is not None:\n save_json(json_dict, os.path.join(masks_output_dir, \"crop_information.json\"))",
"def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)",
"def run(self):\n #calculate platescale of first input image\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.cd)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n try:\n det = np.linalg.det(wcs.WCS(self.datain[0].header).wcs.pc)\n pscale = np.sqrt(np.abs(det))*3600.\n except:\n pscale = self.datain[0].header['PIXSCAL']\n #filtering out images which are too far away from the others\n #passing images added to a list of (image, WCS) tuples\n '''\n image_centers = []\n for f in self.datain:\n image_centers.append((f.header['CRVAL1'], f.header['CRVAL2']))\n filtered_datain = []\n dist_list = [[[0]*(len(image_centers)-1)]*len(image_centers)]\n for i in range(len(image_centers)):\n for j in range(len(image_centers)-1):\n dist_list[i][j+1] = np.sqrt((image_)**2+()**2)\n '''\n #calculations necessary for updating wcs information\n px = []\n py = []\n \n #in order to avoid NaN interactions, creating weight map\n weights=[]\n for f in self.datain:\n weights.append((np.where(np.isnan(f.image) == True, 0, 1)))\n \n for f in self.datain:\n px.extend(wcs.WCS(f.header).calc_footprint()[:,0])\n py.extend(wcs.WCS(f.header).calc_footprint()[:,1])\n x0 = (max(px)+min(px))/2.\n y0 = (max(py)+min(py))/2.\n sx = (max(px)-min(px))*np.cos(y0/180*np.pi) # arcsec\n sy = (max(py)-min(py)) # arcsec\n size = (sx*3600+self.getarg('pad')*2, sy*3600+self.getarg('pad')*2)\n xpix = size[0]//pscale\n ypix = size[1]//pscale\n cdelt = [pscale/3600.]*2\n \n #create self.dataout and give it a copy of an input's header\n self.dataout = DataFits(config = self.config)\n self.dataout.header = self.datain[0].header.copy()\n \n #update header wcs information\n self.log.info('Creating new WCS header')\n \n self.dataout.header['CRPIX1'] = xpix/2\n self.dataout.header['CRPIX2'] = ypix/2\n self.dataout.header['CRVAL1'] = x0\n self.dataout.header['CRVAL2'] = y0\n self.dataout.header['CD1_1'] = -cdelt[0]\n self.dataout.header['CD1_2'] = self.dataout.header['CD2_1'] = 0.\n self.dataout.header['CD2_2'] = cdelt[1]\n self.dataout.header['NAXIS1'] = int(xpix)\n self.dataout.header['NAXIS2'] = int(ypix)\n self.dataout.header['CTYPE1'] = 'RA---TAN-SIP'\n self.dataout.header['CTYPE2'] = 'DEC--TAN-SIP'\n self.dataout.header['RADESYS'] = 'ICRS'\n self.dataout.header['EQUINOX'] = 2000\n self.dataout.header['LATPOLE'] = self.datain[0].header['CRVAL2']\n self.dataout.header['LONPOLE'] = 180\n self.dataout.header['PIXASEC'] = pscale\n \n theta_rad = np.deg2rad(self.getarg('outangle'))\n rot_matrix = np.array([[np.cos(theta_rad), -np.sin(theta_rad)], \n [np.sin(theta_rad), np.cos(theta_rad)]])\n rot_cd = np.dot(rot_matrix, np.array([[self.dataout.header['CD1_1'], 0.],[0., self.dataout.header['CD2_2']]]))\n for i in [0,1]:\n for j in [0,1]:\n self.dataout.header['CD{0:d}_{1:d}'.format(i+1, j+1)] = rot_cd[i,j]\n \n #check drizzle arguments\n if self.getarg('kernel') == 'smoothing':\n kernel = 'lanczos3'\n elif self.getarg('kernel') in ['square', 'point', 'gaussian', 'tophat']:\n kernel = self.getarg('kernel')\n else:\n self.log.error('Kernel name not recognized, using default')\n kernel = 'square'\n if self.getarg('drizzleweights') == 'uniform':\n driz_wt = ''\n elif self.getarg('drizzleweights') in ['exptime', 'expsq']:\n driz_wt = self.getarg('drizzleweights')\n else:\n self.log.error('Drizzle weighting not recognized, using default')\n driz_wt = ''\n \n #create drizzle object and add input images\n fullwcs = wcs.WCS(self.dataout.header)\n self.log.info('Starting drizzle')\n driz = drz.Drizzle(outwcs = fullwcs, pixfrac=self.getarg('pixfrac'), \\\n kernel=kernel, fillval='10000', wt_scl=driz_wt)\n for i,f in enumerate(self.datain):\n self.log.info('Adding %s to drizzle stack' % f.filename)\n driz.add_image(f.imgdata[0], wcs.WCS(f.header), inwht=weights[i])\n \n try:\n fillval=float(self.getarg('fillval'))\n except:\n fillval=np.nan\n self.log.error('Fillvalue not recognized or missing, using default')\n \n #creates output fits file from drizzle output\n self.dataout.imageset(np.where(driz.outsci == 10000, fillval, driz.outsci))\n self.dataout.imageset(driz.outwht,'OutWeight', self.dataout.header)\n self.dataout.filename = self.datain[0].filename\n\n #add history\n self.dataout.setheadval('HISTORY','Coadd: %d files combined with %s kernel, pixfrac %f at %f times resolution' \\\n % (len(self.datain), kernel, self.getarg('pixfrac'), self.getarg('resolution')))",
"def gif(self, radius=150, n_instance=10):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n\n offset_X_temp = 0 # locals, relative to img2 given, not absolute\n offset_X_tot = 0\n\n if n_instance == 1: # avoid two offsets for a single image\n n_instance = 0\n\n coords = np.array(create_circle(radius, n_instance))\n # coords = np.array(coords)\n images = []\n\n # +1 for final offset to set back image to initial offset\n for i, (x, y) in enumerate(coords):\n self.img_offset = self.img_debut.copy()\n self.img_offset = img_offset_X(self.img_offset, x)\n self.img_offset = img_offset_Y(self.img_offset, y)\n\n img2 = self.img_pixels(self.img_offset)\n # img2 = self.img_debut\n\n if self.fixed_background is True:\n img2 = img_offset_X(img2, -x) # if want a fixed background and moving black hole\n img2 = img_offset_Y(img2, -y)\n\n # adding binary ----------------------------------\n self.img_offset = img2\n self.img_offset = img_offset_X(self.img_offset, -x)\n self.img_offset = img_offset_Y(self.img_offset, -y)\n\n img3 = self.img_pixels(self.img_offset)\n\n if self.fixed_background is True:\n img3 = img_offset_X(img3, x) # if want a fixed background and moving black hole\n img3 = img_offset_Y(img3, y)\n # finished adding binary -------------------------\n size_x, size_y = img3.size\n img3 = img3.crop((450, 230, size_x-450, size_y-230))\n image_name_save = os.path.join(r'D:\\black_hole_sim\\gif_img_save', 'img_%s_x_%s_y_%s.jpg' % (i, x, y))\n img3.save(image_name_save)\n print(\"Save: \"+ image_name_save)\n images.append(img3)\n\n images[0].save(r'D:\\black_hole_sim\\gif_img_save\\animated_image.gif', format='GIF', save_all=True, append_images=images[1:], optimize=False, duration = 100)\n self.img2 = img2",
"def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts",
"def find_path(masked_image,start_pos, target_pos, size_compress_index, active_particle_size,\r\n compress = False):\r\n \r\n \r\n not_image = cv2.bitwise_not(masked_image)\r\n image_index = size_compress_index\r\n \r\n start_x,start_y = start_pos\r\n end_x, end_y = target_pos\r\n \r\n ker1=cv2.getStructuringElement(cv2.MORPH_RECT, (3,3),anchor =(-1,-1))\r\n not_image = cv2.dilate(not_image,ker1,iterations = active_particle_size//2)\r\n\r\n small_image = cv2.resize(not_image, (st_width//image_index, st_height//image_index),interpolation = cv2.INTER_AREA)\r\n ret,small_image = cv2.threshold(small_image,127,255,cv2.THRESH_BINARY)\r\n \r\n small_image = cv2.bitwise_not(small_image)\r\n # \r\n #cv2.imshow(\"thresh\", small_image)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows() \r\n \r\n \r\n matrix = small_image.tolist()\r\n grid = Grid(matrix=matrix)\r\n\r\n start = grid.node(int(start_x//image_index), int(start_y//image_index))\r\n end = grid.node(int(end_x//image_index), int(end_y//image_index))\r\n\r\n finder = AStarFinder(diagonal_movement = DiagonalMovement.never)\r\n path, runs = finder.find_path(start, end, grid)\r\n \r\n new_path = list()\r\n for p in path:\r\n x,y = p\r\n x = x*image_index\r\n y = y*image_index\r\n new_path.append((x,y))\r\n \r\n compressed_path = compress_path(new_path)\r\n \r\n if compress == True:\r\n res_path = compressed_path\r\n else:\r\n res_path = new_path\r\n \r\n return res_path, runs",
"def generateMask(self, nameFile): \n imgPath = os.path.join(GG.utils.PATH_PHOTO_MASK, nameFile)\n imgMask = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\")))\n imgTemplate = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"template.png\")))\n imgUpload = Image.open(imgPath)\n size = MASK_SIZE[self.avatarConfiguration[\"headSize\"]]\n imgUploadResized = imgUpload.resize(size, Image.ANTIALIAS)\n imgMask.paste(imgUploadResized, MASK_COORD[self.avatarConfiguration[\"headSize\"]], imgTemplate)\n imgMask.save(MASK_UPLOAD)\n self.avatarConfiguration[\"mask\"] = \"imgUploadMask.png\"\n self.paintMask()",
"def _buildMaskImage(self,maskname, mask_array):\n # If an old version of the maskfile was present,\n # remove it and rebuild it.\n if fileutil.findFile(maskname):\n fileutil.removeFile(maskname)\n\n _file = pyfits.open(maskname,mode='append')\n _phdu = pyfits.PrimaryHDU(data=mask_array)\n\n _file.append(_phdu)\n _file.close()\n del _file, _phdu",
"def create_centers(img_input, r, color_treshold):\n\n # make a copy of the input image\n img_np = np.copy(img_input[:,:,2])\n\n # cast radius to int\n r = np.int32(r)\n\n # define the dimensions of extended image\n ext1 = img_np.shape[0]+2*r\n ext2 = img_np.shape[1]+2*r\n\n # create the extended image \n img_ext = np.zeros((ext1, ext2))\n \n # indexing for copying all img_np pixels into img_ext\n left_index = (r,r)\n right_index = (img_ext.shape[0]-r, img_ext.shape[1]-r)\n \n # select axis0 and axis1 values of img_ext which are to be \n # replaced with img_np values.\n img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]] = img_np\n #io.imshow(img_ext)\n #io.show()\n #print(img_ext)\n\n # define the circular mask of radius r. \n mask = mk.circular_mask(r)\n\n \n # WHILE INSTANTIATION\n # This loop finds out the positions of intensity values maxcol \n # in the image. maxcol is initially set to 255, but \n # gets updated during the loop and will correspond to the maximum\n # intensity value found in the image. Then, all pixels will be \n # selected with the same intensity value. \n \n maxcol = 255\n\n # create an empty list to save the maximum intensity value corresponding \n # to the center of a nucleus. \n \n save_c_max = []\n\n while maxcol > color_treshold:\n # find maximum intensity value in img_ext.\n maxcol = np.amax(img_ext)\n\n # find position of maxcol value\n img_whitex, img_whitey = np.where(img_ext == maxcol)\n\n # select the first position with maximum intensity value\n first = (img_whitex[0], img_whitey[0])\n \n # specify indices where to apply the mask\n left_index = (first[0]-r, first[1]-r)\n right_index = (first[0]+r, first[1]+r)\n \n # create a squared subselection of the img_ext whose size is equal to mask\n submattochange = img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]]\n \n # apply the mask\n img_ext[left_index[0]:right_index[0], left_index[1]:right_index[1]] = np.multiply(submattochange,mask)\n \n # show the cells replaced by the mask\n # io.imshow(img_ext)\n # io.show()\n \n # save the values of position and intensity\n list_save = [first[0]-r, first[1]-r, maxcol]\n \n # put list_save list into save_c_max\n save_c_max.append(list_save)\n\n # cast save_c_max to int\n save_c_max = np.int32(np.array(save_c_max))\n\n i = 0\n while i < save_c_max.shape[0]:\n \n # This while iterates over all found center pixels of\n # the nuclei and replaces their color with red \n # (channel 0, intensity 255). \n \n img_input[save_c_max[i,0], save_c_max[i,1], 0] = 255\n i = i+1\n \n #r\"\"\"\n #Display image of the nuclei whose found center pixel \n #is colored red. \n #\"\"\"\n #plt.figure()\n #io.imshow(img_input)\n #io.show()\n \n return save_c_max # np.array that contains int of position and intensity of the centers",
"def paintMask(self):\n if self.avatarConfiguration[\"mask\"]:\n if not os.path.isfile(MASK_UPLOAD):\n image = self.parent.getPlayer().getImageLabel()\n filePath = GG.genteguada.GenteGuada.getInstance().getDataPath(image)\n guiobjects.generateImageSize(filePath, [244, 244], IMG_UPLOAD)\n self.generateMask(\"imgUpload.png\")\n imgPath = MASK_UPLOAD\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\"))\n self.newAvatarImage(imgPath, \"mask\")",
"def main(logger, resultsDict):\n\n print(\"=\" * 30)\n print(\"Main function of overlayMasks.\")\n print(\"=\" * 30)\n\n # Get parameters from .json files.\n full_img_dir = config_overlay[\"full_img_dir\"]\n y_true_dir = config_overlay[\"y_true_dir\"]\n y_pred_dir = config_overlay[\"y_pred_dir\"]\n extension = config_overlay[\"extension\"]\n target_size = (config_overlay[\"target_size\"], config_overlay[\"target_size\"])\n save_maskoverlay_dir = config_overlay[\"save_maskoverlay_dir\"]\n save_fulloverlay_dir = config_overlay[\"save_fulloverlay_dir\"]\n\n # ------------\n\n # Get paths.\n full_img_paths_list = []\n y_true_paths_list = []\n y_pred_paths_list = []\n\n for full in os.listdir(full_img_dir):\n if full.endswith(extension):\n full_img_paths_list.append(os.path.join(full_img_dir, full))\n\n for full in os.listdir(y_true_dir):\n if full.endswith(extension):\n y_true_paths_list.append(os.path.join(y_true_dir, full))\n\n for full in os.listdir(y_pred_dir):\n if full.endswith(extension):\n y_pred_paths_list.append(os.path.join(y_pred_dir, full))\n\n full_img_paths_list.sort()\n y_true_paths_list.sort()\n y_pred_paths_list.sort()\n\n # ------------\n\n # Load full_img.\n full_img_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in full_img_paths_list\n ]\n\n # Load y_true masks.\n y_true_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_true_paths_list\n ]\n\n # Load y_pred masks.\n y_pred_arrays = [\n cv2.resize(src=cv2.imread(path, cv2.IMREAD_GRAYSCALE), dsize=target_size)\n for path in y_pred_paths_list\n ]\n\n print(full_img_arrays[0].min(), full_img_arrays[0].max())\n print(y_true_arrays[0].min(), y_true_arrays[0].max())\n print(y_pred_arrays[0].min(), y_pred_arrays[0].max())\n\n # ------------\n\n # Stack to create RGB version of grayscale images.\n full_img_rgb = [np.stack([img, img, img], axis=-1) for img in full_img_arrays]\n\n # Green true mask. Note OpenCV uses BGR.\n y_true_rgb = [\n np.stack([np.zeros_like(img), img, np.zeros_like(img)], axis=-1)\n for img in y_true_arrays\n ]\n\n # Red predicted mask. Note OpenCV uses BGR.\n y_pred_rgb = [\n np.stack([np.zeros_like(img), np.zeros_like(img), img], axis=-1)\n for img in y_pred_arrays\n ]\n\n # ------------\n\n for i in range(len(full_img_rgb)):\n\n # First overlay true and predicted masks.\n overlay_masks = cv2.addWeighted(\n src1=y_true_rgb[i], alpha=0.5, src2=y_pred_rgb[i], beta=1, gamma=0\n )\n\n # Then overlay full_img and masks.\n overlay_all = cv2.addWeighted(\n src1=full_img_rgb[i], alpha=1, src2=overlay_masks, beta=0.5, gamma=0\n )\n\n # Save.\n\n # Get patient ID from y_true masks.\n filename = os.path.basename(y_true_paths_list[i])\n filename_split = filename.split(\"_\")\n patientID = \"_\".join([filename_split[i] for i in range(4)])\n\n masks_filename = patientID + \"___MasksOverlay.png\"\n all_filename = patientID + \"___AllOverlay.png\"\n\n save_path_masks = os.path.join(save_maskoverlay_dir, masks_filename)\n save_path_all = os.path.join(save_fulloverlay_dir, all_filename)\n\n print(save_path_masks)\n print(save_path_all)\n\n cv2.imwrite(filename=save_path_masks, img=overlay_masks)\n cv2.imwrite(filename=save_path_all, img=overlay_all)",
"def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)",
"def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )",
"def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)"
]
| [
"0.6629627",
"0.62719816",
"0.6038128",
"0.60295534",
"0.5812431",
"0.57740736",
"0.57329434",
"0.5661095",
"0.56264246",
"0.5604142",
"0.5582197",
"0.5575477",
"0.55748725",
"0.5556835",
"0.55290353",
"0.5528477",
"0.55214965",
"0.5478416",
"0.5466864",
"0.5445852",
"0.5437116",
"0.5435135",
"0.5428305",
"0.54104334",
"0.5404223",
"0.5397674",
"0.53717035",
"0.5356743",
"0.5354936",
"0.53455794"
]
| 0.7799771 | 0 |
Run zscan on a single healpix pixel. All files will be placed in self.config.outpath (see self.__init__) | def run(self):
if not self.config.galfile_pixelized:
raise ValueError("Code only runs with pixelized galfile.")
self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)
# Compute the border size
self.config.border = self.config.compute_border()
self.config.d.hpix = [self.pixel]
self.config.d.nside = self.nside
self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)
# Do the run
self.config.start_file_logging()
self.config.logger.info("Running zscan on pixel %d" % (self.pixel))
runzscan = RunZScan(self.config)
if not os.path.isfile(runzscan.filename):
runzscan.run()
runzscan.output(savemembers=True, withversion=True)
self.config.stop_file_logging() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True,\n check_parfile=True, check_randfile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n self.config.start_file_logging()\n self.config.logger.info(\"Running zmask on pixel %d\" % (self.pixel))\n\n rand_zmask = RunRandomsZmask(self.config)\n\n if not os.path.isfile(rand_zmask.filename):\n rand_zmask.run()\n rand_zmask.output(savemembers=False, withversion=False)\n\n # All done\n self.config.stop_file_logging()",
"def run(self):\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=False, check_bkgfile=True, check_bkgfile_components=False, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n\n self.config.logger.info(\"Running runcat on pixel %d\" % (self.pixel))\n\n runcat = RunCatalog(self.config)\n if not os.path.isfile(runcat.filename):\n runcat.run(do_percolation_masking=self.config.runcat_percolation_masking)\n runcat.output(savemembers=True, withversion=True)\n\n self.config.stop_file_logging()",
"def run(self):\n\n # need to think about outpath\n\n # Make sure all files are here and okay...\n\n if not self.config.galfile_pixelized:\n raise ValueError(\"Code only runs with pixelized galfile.\")\n\n self.config.check_files(check_zredfile=True, check_bkgfile=True, check_bkgfile_components=True, check_parfile=True, check_zlambdafile=True)\n\n # Compute the border size\n\n self.config.border = self.config.compute_border()\n\n self.config.d.hpix = [self.pixel]\n self.config.d.nside = self.nside\n self.config.d.outbase = '%s_%d_%05d' % (self.config.outbase, self.nside, self.pixel)\n\n # Do the run\n self.config.start_file_logging()\n self.config.logger.info(\"Running redMaPPer on pixel %d\" % (self.pixel))\n\n firstpass = RunFirstPass(self.config)\n\n if not os.path.isfile(firstpass.filename):\n firstpass.run()\n firstpass.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Firstpass file %s already present. Skipping...\" % (firstpass.filename))\n\n self.config.catfile = firstpass.filename\n\n # Clear out the firstpass memory\n del firstpass\n\n like = RunLikelihoods(self.config)\n\n if not os.path.isfile(like.filename):\n like.run()\n like.output(savemembers=False, withversion=False)\n else:\n self.config.logger.info(\"Likelihood file %s already present. Skipping...\" % (like.filename))\n\n self.config.catfile = like.filename\n\n # Clear out the likelihood memory\n del like\n\n perc = RunPercolation(self.config)\n\n if not os.path.isfile(perc.filename):\n perc.run()\n perc.output(savemembers=True, withversion=False)\n else:\n self.config.logger.info(\"Percolation file %s already present. Skipping...\" % (perc.filename))\n\n self.config.stop_file_logging()",
"def test_scan_file(self):\n self.run_scan(self.filename, 1)",
"def main():\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)",
"def skywalker(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tbin = kargs.get('bin', this._BINARY)\n\t\toffshore = kargs.get('offshore', 5)\n\t\tminSize = kargs.get('minSize', 3)\n\t\tblur = kargs.get('blur', False)\n\t\t\n\t\tif blur: # Flou de test\n\t\t\tkernel = np.ones((3, 3), np.float32)/9\n\t\t\tbin = cv2.filter2D(bin, -1, kernel)\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tscan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\tthis._SCAN = scan\n\t\t\n\t\tstep = 0 # Compteur de pas dans le vide\n\t\tstart, end = None, None\n\t\t\n\t\t# Dimensions de l'image à scanner\n\t\tsize = D2Point(width(bin), height(bin))\n\t\tratio = size if minSize < 1 else 1\n\t\t\n\t\t# Scan pixel par pixel, en partant du bas\n\t\tfor v in xrange(int(size.y)-1, -1, -1):\n\t\t\tfor u in xrange(int(size.x)):\n\t\t\t\n\t\t\t\tif bin.item((v, u)): # Si un pixel != 0:\n\t\t\t\t\tscan[v,u] = [0, 0, 255] # Rouge.\n\t\t\t\t\tstep = 0 # On reset le jump\n\t\t\t\t\t\n\t\t\t\t\t# Si c'est le premier\n\t\t\t\t\tif not start:\n\t\t\t\t\t\tstart = D2Point(u, v)\n\t\t\t\t\t\tend = D2Point(u, v)\n\t\t\t\t\telse: # On trace\n\t\t\t\t\t\tend.x, end.y = u, v\n\t\t\t\t\n\t\t\t\telif end:\n\t\t\t\t\tif step < offshore:\n\t\t\t\t\t\tscan[v,u] = [0, 255, 255] # Jaune\n\t\t\t\t\t\tstep += 1 # On continue\n\t\t\t\t\telif abs((start - end)/ratio) < minSize:\n\t\t\t\t\t\tstart, end = None, None\n\t\t\t\t\telse: break\n\t\t\t\t# elif end: break\n\t\t\t###\n\t\t\tif end: break\n\t\t###\n\t\t\n\t\tif end: # Si on a trouvé une fin\n\t\t\t\n\t\t\t# Point médian = doigt\n\t\t\tresult = start % end\n\t\t\t\n\t\t\t# Visuel\n\t\t\tscan[:,result.x,:] = [0, 255, 0] # On trace une bande verte\n\t\t\tscan[result.y,:,:] = [0, 127, 0] # On trace une autre bande verte\n\t\t\t\n\t\t\t# Reformatage\n\t\t\tresult /= size-1 # On remet en ratio d'image\n\t\t\tresult.x = 1 - result.x # On inverse le côté de mesure\n\t\t\t\n\t\t\t# Stockage\n\t\t\tthis._DETECTED = result # On stocke le point détecté\n\t\t\tthis._BOTTOM = result.y == 1 # On clic ou bien ?\n\t\t\n\t\t# Si rien\n\t\telse:\n\t\t\tresult = None\n\t\t\tthis._BOTTOM = False\n\t\t\n\t\t# Tchao\n\t\treturn result",
"def run_scan(self, path, expt, ext=\"\"):\n f = CallCounter()\n symbaudio.utils.filesystem.apply_to_files(path, ext, f, enable_logs=False)\n self.assertEqual(f.count, expt)",
"def _scan(self):\n\n exposure_time = float(self._header_dictionary[\"TIME\"])\n epoch = 0\n osc_start = float(self._header_dictionary[\"OSC_START\"])\n osc_range = float(self._header_dictionary[\"OSC_RANGE\"])\n\n return self._scan_factory.single_file(\n self._image_file, exposure_time, osc_start, osc_range, epoch\n )",
"def main():\n stats = []\n start = timer()\n\n for file_name in get_dataset():\n\n # load image and ground truth detection mask\n img = cv2.imread(settings.PATH + file_name)\n ground_truth_mask = cv2.imread(settings.PATH_GT_MASKS + file_name)\n\n # Find list of barcode regions (rotated rectangle) within image\n barcode_regions, debug_img = find_barcodes(img)\n barcode_regions_mask = np.zeros(img.shape, np.uint8)\n barcode_images = None\n result = []\n\n # Decode barcode regions\n for barcode_region in barcode_regions:\n\n # Decode barcode image\n barcode_img = barcode_region.extract_from(img)\n barcode_mask = barcode_region.get_mask(img)\n debug_img = barcode_region.draw(debug_img)\n\n # Combine masks from multiple detected regions\n barcode_regions_mask += barcode_mask\n\n # Decode barcode\n decoded = pyzbar.decode(barcode_img)\n\n # Keep result for logging\n data = \", \".join([d.data.decode(\"utf-8\") for d in decoded])\n result.append({\"data\": data, \"region\": barcode_region.json()})\n\n if settings.SHOW_IMAGE:\n barcode_images = img_concat(barcode_images, barcode_img)\n\n # Jaccard_accuracy = intersection over union of the two binary masks\n jaccard_accuracy = 0\n if ground_truth_mask is not None:\n r = barcode_regions_mask.max(axis=-1).astype(bool)\n u = ground_truth_mask.max(axis=-1).astype(bool)\n jaccard_accuracy = float((r & u).sum()) / (r | u).sum()\n stats.append(jaccard_accuracy)\n\n # Log result\n logger.info(\n \"Image processed\",\n file_name=file_name,\n jaccard_accuracy=jaccard_accuracy,\n success=jaccard_accuracy > 0.5,\n result=result,\n )\n\n # In debug mode show visualization of detection algorithm\n if settings.SHOW_IMAGE:\n\n # Add alpha channel\n debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2BGRA)\n if barcode_images is not None:\n barcode_images = cv2.cvtColor(barcode_images, cv2.COLOR_BGR2BGRA)\n\n # Overlay error mask\n # Pixel-wise difference between ground truth and detected barcodes\n if ground_truth_mask is not None:\n error_img = np.zeros(debug_img.shape, np.uint8)\n error_img[r & u] = np.array([0, 0, 0, 0], dtype=np.uint8)\n error_img[np.logical_xor(r, u)] = np.array(\n [0, 0, 255, 1], dtype=np.uint8\n )\n debug_img = cv2.addWeighted(debug_img, 1, error_img, 0.5, 0)\n\n # Append barcode pictures to the right\n debug_img = img_concat(debug_img, barcode_images, axis=1)\n\n # Show visualization\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"img\", debug_img)\n cv2.waitKey(0)\n\n # Calculate final stats\n end = timer()\n accuracy = np.array(stats).mean()\n successes = np.where(np.array(stats) > 0.5)[0]\n logger.info(\n \"Final stats\",\n accuracy=accuracy,\n detection_rate=float(len(successes)) / len(stats),\n fps=len(stats) / (end - start),\n )",
"def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)",
"def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)",
"def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)",
"def extract_scans(self):\n config_summary_fpath = os.path.join(\n self.scandir,\n 'config_summary.json'\n )\n cfg = from_file(config_summary_fpath)\n \n self.data_is_data = cfg['data_is_data']\n if self.data_is_data:\n raise ValueError('Analysis should NOT have been performed '\n 'on data since this script should only '\n 'process output from MC studies.')\n\n # Get naming scheme\n labels = Labels(\n h0_name=cfg['h0_name'],\n h1_name=cfg['h1_name'],\n data_name=cfg['data_name'],\n data_is_data=self.data_is_data,\n fluctuate_data=self.fluctuate_data,\n fluctuate_fid=self.fluctuate_fid\n )\n bits = labels.dict['data_name'].split('_')\n bare_truth = bits[0]\n injparam = None\n for bit in bits:\n if not (bit == bare_truth):\n if injparam is None:\n injparam = bit\n else:\n injparam += '_%s'%bit\n self.labels[injparam] = labels\n # Get starting params\n self.get_starting_params(cfg=cfg, injparam=injparam)\n self.get_data(injparam=injparam)",
"def z_scan(self, z_position_list):\n self.generic_scan(self.z, z_position_list)",
"def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)",
"def _scan(self):\n\n return self._scan_factory.make_scan(\n image_range=(1, 1),\n # dummy value--return to this later please\n exposure_time=1,\n oscillation=(\n self.detectorbase.osc_start,\n self.detectorbase.osc_start + self.detectorbase.deltaphi,\n ),\n epochs={1: 0.0}, # Later get the true time values from HDF5 file\n )",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--scans_dir', metavar='<scans_dir>', dest='SCANS_DIR', action='store', required=True,\n help='Full path to directory where scan tiff stacks are. This directory should ONLY contain '\n 'scan tiff stacks.')\n parser.add_argument('--masks_dir', metavar='<scans_dir>', dest='MASKS_DIR', action='store', required=False,\n help='Full path to directory where stroke masks are. Stroke masks should be 8-bit grayscale '\n 'tiff stacks with the .tif extension. There should be one stroke mask for each scan in t'\n 'he <scans_dir> directory and this pairing should have identical ZYX dimensions. The str'\n 'oke mask tiffs should be named following this example: If <scans_dir> has a file called'\n 'scan1.tif, the corresponding stroke mask should be named scan1_stroke_mask.tif')\n parser.add_argument('--W', metavar='<INIT_W', dest='INIT_W', action='store', required=True,\n help='An integer value representing the width of the cropping box.')\n\n parser.add_argument('--H', metavar='<INIT_H', dest='INIT_H', action='store', required=True,\n help='An integer value representing the height of the cropping box.')\n\n args = vars(parser.parse_args())\n crop_all_stacks(args)\n return 0",
"def analyzeImage(self, imageName, outputdb= None,\n outputimageRootDir=None, mintexp=60):\n\n # TODO: Make this procedure thread safe so it can be accelerated a bit.\n\n retCatalog = self.generateCrossmatchedCatalog(imageName, mintexp=mintexp)\n\n if (retCatalog is None) or (retCatalog['instmag'] is None) or (len(retCatalog['ra']) < 10):\n if retCatalog is None:\n return\n\n\n if len(retCatalog['ra']) < 10:\n _logger.info (\"%s: Catalog returned, but is has less than 10 stars. Ignoring. \" % (imageName))\n return\n\n # calculate the per star zeropoint\n magZP = retCatalog['refmag'] - retCatalog['instmag']\n refmag = retCatalog['refmag']\n refcol = retCatalog['refcol']\n\n # Calculate the photometric zeropoint.\n # TODO: Robust median w/ rejection, error propagation.\n\n cleandata = self.reject_outliers(magZP, 3)\n photzp = np.median(cleandata)\n photzpsig = np.std(cleandata)\n\n # calculate color term\n\n try:\n cond = (refcol > 0) & (refcol < 3) & (np.abs(magZP - photzp) < 0.75)\n colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)\n color_p = np.poly1d(colorparams)\n delta = np.abs(magZP - photzp - color_p(refcol))\n cond = (delta < 0.2)\n colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)\n color_p = np.poly1d(colorparams)\n colorterm = colorparams[0]\n\n except:\n _logger.warning(\"could not fit a color term. \")\n color_p = None\n colorterm = 0\n\n # if requested, generate all sorts of diagnostic plots\n if (outputimageRootDir is not None) and (os.path.exists(outputimageRootDir)):\n outbasename = os.path.basename(imageName)\n outbasename = re.sub('.fits.fz', '', outbasename)\n\n ### Zeropoint plot\n plt.figure()\n plt.plot(refmag, magZP, '.')\n plt.xlim([10, 22])\n plt.ylim([photzp-0.5, photzp+0.5])\n plt.axhline(y=photzp, color='r', linestyle='-')\n plt.xlabel(\"Reference catalog mag\")\n plt.ylabel(\"Reference Mag - Instrumnetal Mag (%s)\" % (retCatalog['instfilter']))\n plt.title(\"Photometric zeropoint %s %5.2f\" % (outbasename, photzp))\n plt.savefig(\"%s/%s_%s_zp.png\" % (outputimageRootDir, outbasename, retCatalog['instfilter']))\n plt.close()\n\n ### Color term plot\n plt.figure()\n plt.plot(refcol, magZP - photzp, '.')\n if color_p is not None:\n xp = np.linspace(-0.5, 3.5, 10)\n plt.plot(xp, color_p(xp), '-', label=\"color term fit % 6.4f\" % (colorterm))\n plt.legend()\n\n\n plt.xlim ([-0.5, 3.0])\n plt.ylim ([-1,1])\n plt.xlabel(\"(g-r)$_{\\\\rm{SDSS}}$ Reference\")\n plt.ylabel(\"Reference Mag - Instrumnetal Mag - ZP (%5.2f) %s\" % (photzp, retCatalog['instfilter']))\n plt.title(\"Color correction %s \" % (outbasename))\n plt.savefig(\"%s/%s_%s_color.png\" % (outputimageRootDir, outbasename, retCatalog['instfilter']))\n plt.close()\n\n\n # TODO: Make this thread safe, e.g., write to transactional database, or return values for storing externally.\n\n if outputdb is not None:\n outputdb.addphotzp ( (imageName, retCatalog['dateobs'].replace('T', ' '), retCatalog['siteid'], retCatalog['domid'],\n retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],\n retCatalog['airmass'], photzp, colorterm, photzpsig))\n else:\n _logger.info (\"Not safing output for image %s \" % imageName)\n # with open(pickle, 'a') as f:\n # output = \"%s %s %s %s %s %s %s %s % 6.3f % 6.3f % 6.3f\\n\" % (\n # imageName, retCatalog['dateobs'], retCatalog['siteid'], retCatalog['domid'],\n # retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],\n # retCatalog['airmass'], photzp, colorterm, photzpsig)\n # _logger.info(output)\n # f.write(output)\n # f.close()\n\n return photzp",
"def skyPixelsHPX(self,i, d,feedindex):\n\n # We store all the pointing information\n x = d['level1/spectrometer/pixel_pointing/pixel_ra'][feedindex,:][:,self.select_mask]\n x = x[:,0:self.datasizes[i]].flatten()\n y = d['level1/spectrometer/pixel_pointing/pixel_dec'][feedindex,:][:,self.select_mask]\n y = y[:,0:self.datasizes[i]].flatten()\n \n # convert to Galactic\n rot = hp.rotator.Rotator(coord=['C','G'])\n gb, gl = rot((90-y)*np.pi/180., x*np.pi/180.)\n\n pixels = hp.ang2pix(self.nside, gb, gl)\n return pixels",
"def process_scan(path):\n # Read scan\n volume = read_nifti_file(path)\n # Normalize\n volume = normalize(volume)\n # Resize width, height and depth\n volume = resize_volume(volume)\n return volume",
"def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')",
"def scan(self):\n return",
"def __init__(self, inifile, dry_run, output):\n\n config = ConfigParser()\n config.read(inifile)\n sequence = config['dithersequence']\n\n # Set up the output.\n self._output = output\n\n # Set up the file type and exposure sequence.\n self._location = sequence['location']\n self._filetype = sequence['filetype']\n self._date = sequence['date']\n self._exposures = [int(e) for e in sequence['exposures'].split()]\n\n if 'coordinates' not in config:\n raise ValueError('no coordinates set for dither!')\n \n coords = config['coordinates']\n self._dithertype = coords['dithertype']\n \n self._wcs = fits.getdata(coords['wcsfile'], 2)\n self._wcs = self._wcs[np.argsort(self._wcs['mjd_obs'])]\n self._central_exposure = int(sequence['centralexposure'])\n\n if coords['dithertype'] == 'telescope':\n fadir = coords['fiberassigndir']\n self._ditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['ditheredtilenum']))\n self._unditherfa = fits.getdata(os.path.join(\n fadir, 'fiberassign-%s.fits' % coords['unditheredtilenum']))\n expnum = [int(fn.split('-')[1]) for fn in self._wcs['filename']]\n centralind = expnum.index(self._central_exposure)\n self._central_wcs = self._wcs[centralind]\n\n # Set the Tile ID for the output metadata.\n self._tileid = coords['unditheredtilenum']\n else:\n raise ValueError('not implemented')\n\n # Extract the list of exposures on disk.\n self._exposure_files = self._getfilenames()\n\n if not dry_run:\n # Construct fiber output.\n self._exposure_table = self._buildtable()",
"def run(argv=sys.argv[1:]):\n clparser = argparse.ArgumentParser(description='Determine whether there' +\n ' are traces of helium in a given spectrum.')\n clparser.add_argument('-v', '--version', action='version',\n version='%(prog)s ' + __version__)\n clparser.add_argument('-a', '--plot-all', action='store_true',\n help='draw plot showing all the lines found in spectrum')\n clparser.add_argument('-p', '--plot', action='store_true',\n help='draw plot showing helium lines in spectrum')\n clparser.add_argument('filenames', nargs='+',\n help='spectrum files to process')\n clparser.add_argument('--verbose', action='store_true',\n help='verbose output (prints lines and signal to noise ratio)')\n clparser.add_argument('-t', '--threshold', nargs='?', type=float,\n const=1.0, default=1.0,\n help='a signal raises that many times above the background noise')\n args = clparser.parse_args(argv)\n\n for fname in args.filenames:\n find_helium(fname, plot=args.plot, plot_all=args.plot_all,\n verbose=args.verbose, threshold=args.threshold)",
"def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1",
"def scanForAssays(self):\n if not self.isDataDir():\n self.setDataDir(os.getcwdu())\n files = os.listdir(self.__datadir)\n print(files)\n for f in files:\n if f.endswith('.tif'):\n print('An TIFF image here! (%s)') % f",
"def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass",
"def retrieveDensity_reg(slidedir:str, filename : str, resultsdir : str, suffix : str = '_results_dirreg.npz'):\n TILESIZE_X = 512\n TILESIZE_Y = 512\n sl = openslide.open_slide(slidedir+os.sep+filename)\n\n tiles_total_x = int(np.floor(sl.dimensions[0] / TILESIZE_X))\n tiles_total_y = int(np.floor(sl.dimensions[1] / TILESIZE_Y))\n\n # calculate 10 HPFs with highest mitotic activity\n # 1 HPF = 0.237 mm^2 \n A = 2.37 # mm^2 \n W_hpf_microns = np.sqrt(A*4/3) * 1000 # in microns\n H_hpf_microns = np.sqrt(A*3/4) * 1000 # in microns\n\n micronsPerPixel = sl.properties[openslide.PROPERTY_NAME_MPP_X]\n\n W_hpf = int(W_hpf_microns / float(micronsPerPixel)) \n H_hpf = int(H_hpf_microns / float(micronsPerPixel))\n\n W_x = int(W_hpf / TILESIZE_X)\n W_y = int(H_hpf / TILESIZE_Y)\n\n f = np.load(bz2.BZ2File(resultsdir + os.sep + filename + suffix+'.bz2','rb'))\n \n\n scorefield=np.zeros((np.max(f['tilesProcessed'][:,1])+1,1+np.max(f['tilesProcessed'][:,0])))\n scorefield[f['tilesProcessed'][:,1],f['tilesProcessed'][:,0]] = np.reshape(f['scores'],-1)\n\n completeMap = scorefield\n\n kernel = np.ones((W_y,W_x),np.float32)\n ma = cv2.filter2D(completeMap, -1, kernel )\n\n return ma, completeMap",
"def main(folder, outputfile):\n parser = argument_parser()\n args = parser.parse_args()\n\n show_all = args.show_all\n verbose = args.verbose\n\n random.seed(args.rng_seed)\n\n args.files = folder\n print args.files\n\n try:\n image = Image.open(args.files[0])\n except IOError, msg:\n print >> sys.stderr, msg\n return 1\n if image.mode == 'P':\n image = image.convert('RGB')\n \n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n\n if not show_all:\n def nothing(a, b):\n pass\n do_something = nothing\n elif args.saving:\n do_something = Imsave(\"saved/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]) + \"/\").save\n else:\n import im_debug\n do_something = im_debug.show\n\n if verbose:\n import time\n class Logger:\n def __init__(self):\n self.t = 0\n\n def __call__(self, m):\n t_n = time.time()\n if self.t > 0:\n print >> sys.stderr, \"\\t\" + str(t_n - self.t)\n print >> sys.stderr, m\n self.t = t_n\n logger = Logger()\n\n else:\n def logger(m):\n pass\n \n if args.manual_mode:\n import manual\n try:\n lines = manual.find_lines(image)\n except manual.UserQuitError:\n #TODO ask user to try again\n return 1\n else:\n if args.l_cache:\n filename = (\"saved/cache/\" + args.files[0][:-4] + \"_\" +\n str(image.size[0]))\n cache_dir = \"/\".join(filename.split('/')[:-1])\n if os.path.exists(filename):\n lines, l1, l2, bounds, hough = pickle.load(open(filename))\n print >> sys.stderr, \"using cached results\"\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n d_file = open(filename, 'wb')\n pickle.dump((lines, l1, l2, bounds, hough), d_file)\n d_file.close()\n else:\n lines, l1, l2, bounds, hough = linef.find_lines(image, do_something, logger)\n\n grid, lines = gridf.find(lines, image.size, l1, l2, bounds, hough,\n show_all, do_something, logger)\n if show_all:\n im_g = image.copy()\n draw = ImageDraw.Draw(im_g)\n for l in grid[0] + grid[1]:\n draw.line(l, fill=(64, 255, 64), width=1)\n do_something(im_g, \"grid\", name=\"grid\")\n\n intersections = intrsc.b_intersects(image, lines, show_all, do_something, logger)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n\n logger(\"finished\")\n\n # TODO! refactor this mess:\n if len(args.files) == 1:\n\n if args.sgf_output:\n print board.asSGFsetPos()\n else:\n print board\n \n else:\n game = output.Game(19, board) #TODO size parameter\n #for f in args.files[1:]:\n for i, f in enumerate(args.files):\n try:\n image = Image.open(f)\n except IOError, msg:\n print >> sys.stderr, msg\n continue\n if verbose:\n print >> sys.stderr, \"Opening\", f\n if image.mode == 'P':\n image = image.convert('RGB')\n if image.size[0] > args.w:\n image = image.resize((args.w, int((float(args.w)/image.size[0]) *\n image.size[1])), Image.ANTIALIAS)\n board = intrsc.board(image, intersections, show_all, do_something, logger)\n if args.sgf_output:\n game.addMove(board)\n else:\n with open(outputfile + str(i) + \".txt\", \"w\") as f:\n f.write(str(board))\n\n if args.sgf_output:\n print game.asSGF()\n\n return 0",
"def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n createlookuptable(args.imagefolder)\n _logger.info(\"Script ends here\")"
]
| [
"0.7093423",
"0.66149205",
"0.6384764",
"0.6182628",
"0.5557666",
"0.55530584",
"0.5510526",
"0.54714817",
"0.54650265",
"0.5442735",
"0.54236215",
"0.5420005",
"0.54110485",
"0.5377501",
"0.53124654",
"0.530529",
"0.5301755",
"0.5270496",
"0.5258963",
"0.52395946",
"0.5237774",
"0.5207234",
"0.52047575",
"0.52042335",
"0.5200438",
"0.5157077",
"0.5128676",
"0.51017404",
"0.5089804",
"0.5089523"
]
| 0.8213629 | 0 |
Load df to TD Table | def load_td_table(tab_df_list, if_exists='append'):
try:
dest_table, dataframe, client = tab_df_list
if dataframe.empty:
print(f'Table {dest_table} has no new data to load...')
else:
# Converting 'NaN' to NULL
dataframe = dataframe.where(pd.notnull(dataframe), '')
dest_table = dest_table.lower()
client.load_table_from_dataframe(dataframe, dest_table, if_exists=if_exists)
print('Rows: ', str(len(dataframe)), ' are ', if_exists, ' in ', dest_table, ' successfully...')
return None
except Exception as e:
print('Exception in load_td_table_new(): ', str(e))
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_table_to_tracy_file(latname: str, df: pd.DataFrame, filename: str) -> None:\n save_string(parse_table_to_tracy_string(latname, df), filename)",
"def __load_gt(df_table, df_idx, p_csv):\n idx_start = df_idx\n with open(p_csv, 'rb') as f_csv:\n for line in f_csv:\n # decoding french letters and mapping them to UTF-8\n str_line = line.decode('utf-8', 'replace').strip()\n # gt annotations may end with empty lines\n if str_line == '':\n continue\n # parsing line\n img_path, transcript = str_line.split(self.sep)\n img_path = img_path.encode('ascii', 'ignore').decode('ascii', 'ignore')\n # parsing french to ASCII\n transcript = transcript#.lower()\n transcript = self.to_ascii(transcript)\n # determine img path\n img_path = os.path.join(self.imgs_path, img_path)\n # filling table\n # if self.lower_case:\n # transcript = transcript.lower()\n if not os.path.isfile(img_path):\n pass\n # raise FileNotFoundError('No image exists at {}'.format(img_path))\n warnings.warn('No image exists at {}'.format(img_path))\n continue\n if not self.lazy_loading:\n self.form_imgs[img_path] = np.array(Image.open(img_path)).astype(np.uint8)\n # storing relevant data\n df_table['transcription'].append(transcript)\n df_table['form_path'].append(img_path)\n df_table['word_id'].append(df_idx)\n df_table['bbox'].append([])\n df_idx += 1\n return df_table, df_idx, list(range(idx_start, df_idx))",
"def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb",
"def load_table(**kargs):\n from transformer import dehyphenate\n sep = LoincMTRT.delimit # kargs.get('sep', ',')\n input_dir = kargs.get('input_dir', 'data')\n dehyphen = kargs.get('dehyphenate', True)\n deq = kargs.get('dequote', True)\n one_to_one = kargs.get('one_to_one', True)\n\n df = dp.load_generic(input_file=LoincMTRT.table, sep=sep, input_dir=input_dir) \n if dehyphen: \n df = dehyphenate(df, col=LoincMTRT.col_key) # inplace\n # 12345-7 or 123457 \n df = df.drop_duplicates(keep='last') # drop duplicates\n\n if deq: \n df = dequote(df, col=LoincMTRT.col_value)\n\n if one_to_one: \n df = LoincMTRT.resolve_duplicates(df, verbose=1)\n\n return df",
"def write_tde(table_df, tde_fullpath, arg_append):\n if arg_append and not os.path.isfile(tde_fullpath):\n print \"Couldn't append -- file doesn't exist\"\n arg_append = False\n\n # Remove it if already exists\n if not arg_append and os.path.exists(tde_fullpath):\n os.remove(tde_fullpath)\n tdefile = tde.Extract(tde_fullpath)\n\n # define the table definition\n table_def = tde.TableDefinition()\n \n # create a list of column names\n colnames = table_df.columns\n # create a list of column types\n coltypes = table_df.dtypes\n\n # for each column, add the appropriate info the Table Definition\n for col_idx in range(0, len(colnames)):\n cname = colnames[col_idx]\n ctype = fieldMap[str(coltypes[col_idx])]\n table_def.addColumn(cname, ctype) \n\n # create the extract from the Table Definition\n if arg_append:\n tde_table = tdefile.openTable('Extract')\n else:\n tde_table = tdefile.addTable('Extract', table_def)\n row = tde.Row(table_def)\n\n for r in range(0, table_df.shape[0]):\n for c in range(0, len(coltypes)):\n if str(coltypes[c]) == 'float64':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'float32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'int64':\n row.setDouble(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'int32':\n row.setDouble(c, table_df.iloc[r,c])\n elif str(coltypes[c]) == 'object':\n row.setString(c, table_df.iloc[r,c]) \n elif str(coltypes[c]) == 'bool':\n row.setBoolean(c, table_df.iloc[r,c])\n else:\n row.setNull(c)\n # insert the row\n tde_table.insert(row)\n\n tdefile.close()\n print \"Wrote %d lines to %s\" % (len(table_df), tde_fullpath)",
"def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))",
"def data_table(\n filepath=\"sparkify_data.csv\",\n title=\"Engineered Features Dataframe\",\n ):\n df = read_data_csv(filepath)\n fig = go.Figure(\n data=[\n go.Table(\n header=dict(\n values=list(df.columns), align=\"left\"\n ),\n cells=dict(\n values=[df[col] for col in df.columns],\n align=\"left\",\n ),\n )\n ]\n )\n\n fig.update_layout(title=go.layout.Title(text=title, x=0.5))\n\n return fig",
"def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df",
"def df_to_table(slide, df, left, top, width, height, colnames=None):\n rows, cols = df.shape\n #print('rows=',rows,'cols=',cols)\n res = slide.shapes.add_table(rows + 1, cols, left, top, width, height)\n\n if colnames is None:\n colnames = list(df.columns)\n\n # Insert the column names\n for col_index, col_name in enumerate(colnames):\n # Column names can be tuples\n if not isinstance(col_name, str):\n col_name = \" \".join(col_name)\n res.table.cell(0, col_index).text = col_name\n paragraph = res.table.cell(0, col_index).text_frame.paragraphs[0]\n paragraph.font.size = Pt(15)\n paragraph.alignment = PP_ALIGN.CENTER\n res.table.cell(0, col_index).fill.solid()\n res.table.cell(0, col_index).fill.fore_color.rgb = RGBColor(255,100,0)\n #print(col_name)\n\n m = df.to_numpy()\n #print('m numpy array:',m)\n\n\n for row in range(rows):\n for col in range(cols):\n val = m[row, col]\n text = str(val)\n res.table.cell(row + 1, col).text = text\n paragraph = res.table.cell(row+1, col).text_frame.paragraphs[0]\n paragraph.font.size = Pt(12)\n paragraph.font.color.rgb = RGBColor(0, 0, 0) # use black color for now \n res.table.cell(row+1, col).fill.background()",
"def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())",
"def df():\n fs.df()",
"def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())",
"def data_table_low(\n filepath=\"sparkify_data.csv\",\n title=\"Engineered Features Dataframe\",\n ):\n df = read_data_csv_low(filepath)\n fig = go.Figure(\n data=[\n go.Table(\n header=dict(\n values=list(df.columns), align=\"left\"\n ),\n cells=dict(\n values=[df[col] for col in df.columns],\n align=\"left\",\n ),\n )\n ]\n )\n\n fig.update_layout(title=go.layout.Title(text=title, x=0.5))\n\n return fig",
"def create_tsv(df, filename=None):\n table = df.to_string()\n lines = table.splitlines()\n index_name = lines.pop(1).strip()\n lines[0] = index_name + lines[0][len(index_name):]\n table = '\\n'.join(lines)\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(table)\n else:\n return table",
"def load_raw_table(conf, table):\n confrd = load_config_raw_data(conf)\n path_table = Path(confrd[table][\"path\"])\n sep = confrd[table][\"sep\"]\n encoding = confrd[table][\"encoding\"]\n df = pd.read_csv(path_table, sep=sep, encoding=encoding)\n return df",
"def _read_table(hdulist, extname, **kwargs):\n t = _read_ext(Table, hdulist, extname, **kwargs)\n h = hdulist[extname].header\n for i in range(h['TFIELDS']):\n try:\n t.columns[i].unit = h['TUNIT%d' % (i + 1)]\n except Exception:\n pass\n return t",
"def update_table(dd_value, df):\n df = pd.read_json(df, orient=\"split\")\n return summary_table_tmp_rh_tab(df, dd_value)",
"def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)",
"def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True",
"def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)",
"def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())",
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def store_partial_df(df, table_name):\r\n cursor = hana.cursor()\r\n pbar = tqdm(total=len(df.index))\r\n\r\n for index, row in df.iterrows():\r\n pbar.update(1)\r\n statement = 'INSERT INTO \\\"NIKOLAI\\\".\\\"'+table_name+'\\\" ('\r\n for colname in map(str, row.index.tolist()):\r\n statement += '\\\"'+ colname + '\\\",'\r\n statement = statement[:-1] +') VALUES ('\r\n #for value in map(str, row.tolist()):\r\n for value in row.tolist():\r\n if value != value:\r\n statement += 'null,'\r\n elif isinstance(value, int) or isinstance(value, float):\r\n statement += str(value) + ','\r\n else:\r\n statement += '\\''+ str(value) + '\\','\r\n\r\n cursor.execute(statement[:-1] +');')\r\n\r\n pbar.close()\r\n hana.commit()",
"def render(cls, df: DataFrame, *args, **kwargs):\n from labext.widgets.data_table import DataTable\n dt = DataTable(df, *args, **kwargs)\n display(dt.widget, *dt.get_auxiliary_components())",
"def df2db_separate(self, df: pd.DataFrame, tab_name):\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n\n max_df_size = 50000\n\n dfs = df_split(df, batch_size=max_df_size)\n num_piece = len(dfs)\n\n dfs[0].to_sql(tab_name, self.engine, method='multi', index=False)\n if num_piece > 1:\n for pdf in dfs[1:]:\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))\n pdf.to_sql(tab_name + '_tmp', self.engine, method='multi', index=False)\n self.execute(\"INSERT INTO TABLE {tn} SELECT * FROM {tt}\".format(\n tn=tab_name, tt=tab_name + '_tmp'\n ))\n print(len(pdf))\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))",
"def upload_table(self, dataframe, columns_with_files=None,\n columns_with_images=None):\n\n upload_table(self, self.page_id, dataframe,\n columns_with_files, columns_with_images)",
"def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()",
"def table_to_df(db_name, table_name):\n return sqlContext.table(\"{0}.{1}\".format(db_name, table_name))",
"def load_table(self, db_name, table_name, **kwargs):\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists and read\n if engine.dialect.has_table(engine, table_name):\n sql = 'SELECT * FROM %s' % table_name\n\n # Prevent duplicate keys\n kwargs.pop(\"sql\", None)\n kwargs.pop(\"con\", None)\n kwargs.pop(\"coerce_float\", None)\n\n result = pd.read_sql(sql=sql, con=connection, coerce_float=True, **kwargs)\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result",
"def load_data(path=None, dbtable=None, headers=None):\n\n DF = dd.read_csv(\n urlpath=path,\n names=headers,\n dtype='unicode')\n\n dd.to_sql(\n DF,\n name=dbtable,\n uri=data_store,\n if_exists='append',\n index=False\n )"
]
| [
"0.6317331",
"0.62949616",
"0.62701654",
"0.6240764",
"0.6213843",
"0.6152276",
"0.61426777",
"0.6128492",
"0.61188745",
"0.61170876",
"0.6106664",
"0.6071195",
"0.6043488",
"0.60277236",
"0.59732497",
"0.59642226",
"0.5963602",
"0.58879924",
"0.5873924",
"0.587109",
"0.5862338",
"0.58467096",
"0.58457196",
"0.5840316",
"0.5835797",
"0.5830976",
"0.58068246",
"0.5793262",
"0.5783804",
"0.57698166"
]
| 0.6952426 | 0 |
From complete_routes.txt generated by fetch_mbta_routes, outputs list of possible mbta route_ids | def mbta_route_list():
f = open('complete_routes.txt', 'r')
complete_routes = ast.literal_eval(f.read())
#creates list of all route_ids in MBTA system
subway_route_list = []
for x in range(len(complete_routes['mode'])):
if complete_routes['mode'][x]['mode_name'] == 'Subway':
for y in range(len(complete_routes['mode'][x]['route'])):
subway_route_list.append(complete_routes['mode'][x]['route'][y]['route_id'])
#removes duplicates from list and returns
return list(OrderedDict.fromkeys(subway_route_list)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_routes():\n\n return Db().get_line_ids()",
"def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list",
"def bus_routes():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3]) #Journey ID field\n if route not in route_list and route!=\"\": #error handling for extract_bus_routes function\n route_list.append(route)\n return route_list",
"def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover",
"def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes",
"def available_routes():\r\n return (\r\n f\"Available Routes:<br/>\"\r\n f\"/api/v1.0/precipitation<br/>\"\r\n f\"/api/v1.0/stations<br/>\"\r\n f\"/api/v1.0/tobs<br/>\"\r\n f\"/api/v1.0/start<br/>\"\r\n f\"/api/v1.0/start/end\"\r\n )",
"def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n # split netmask and daddr by the IP dots\n netmask_split = entry[NMSK].split('.')\n daddr_split = daddr.split('.')\n\n # bitwise ANd the netmask with the daddr\n result = []\n for i in range(0, len(netmask_split)):\n result.append(str(int(netmask_split[i]) & int(daddr_split[i])))\n \n # compare ANDed result to the network\n is_valid = True\n network_split = entry[NTWK].split('.')\n for i in range(0, len(network_split)):\n if result[i] != network_split[i]:\n is_valid = False\n break\n if is_valid:\n outroutes.append(entry)\n\n if len(outroutes) == 0:\n return outroutes\n\n # reform IP address\n outroutes.sort(key=lambda r: int(r[NMSK].replace('.', '')), reverse=True)\n longest_matching_prefix = int(outroutes[0][NMSK].replace('.', ''))\n outroutes = list(filter(lambda r: int(r[NMSK].replace('.', '')) == longest_matching_prefix, outroutes))\n return outroutes",
"def print_routes() -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n routes = mbta.get_routes()\n title_text = f\"List of Routes on MBTA\"\n print(f\"{title_text:=^80}\")\n for route in routes:\n print(\n f\"ID: {route['id']}, NAME: {route['attributes']['long_name']}\"\n )\n return",
"def get_routes_timetable():\n\n return Db().get_line_ids()",
"def get_all_cab_routes(self):\n cab_routes = self.admin_repository.get_all_routes()\n if cab_routes:\n for cab_route in cab_routes:\n print(\"\\nId : {}\".format(cab_route[0]))\n print(\"Cab Number : {}\".format(cab_route[1]))\n print(\"Route Id : {}\".format(cab_route[2]))\n print(\"Stop Name : {}\".format(cab_route[3]))\n print(\"Stop stage : {}\".format(cab_route[4]))\n print(\"Timings : {}\".format(cab_route[5]))\n print(\"----------------------------\")\n return True\n else:\n print(\"Data Empty/Not Found.\")\n return False",
"def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList",
"def lookup_routes(self, daddr):\n outroutes = []\n binary_of_dest = self.ip_to_binary(daddr)\n best_cidr = float('-inf')\n\n for r in self.routes:\n # convert network and netmask to binary for longest prefix matching\n binary_of_network = self.ip_to_binary(r[MESG][NTWK])\n cidr_of_netmask = self.ip_to_binary(r[MESG][NMSK]).count('1')\n # use subnet mask to get the prefix\n dst = binary_of_dest[:cidr_of_netmask]\n ntk = binary_of_network[:cidr_of_netmask]\n # matching prefixes?\n if dst == ntk:\n # found better match. clear and start over with just this route\n if best_cidr < cidr_of_netmask:\n best_cidr = cidr_of_netmask\n outroutes.clear()\n outroutes.append(r)\n # 1 to 1 match, add route to list\n if best_cidr == cidr_of_netmask:\n outroutes.append(r)\n\n return outroutes",
"def longest_prefix_match(self, routes):\n outroutes = []\n longest_prefix = 0\n for route in routes:\n bin_nmsk = '.'.join([bin(int(x)+256)[3:] for x in route[NMSK].split('.')])\n curr = self.len_pref(bin_nmsk)\n if curr > longest_prefix:\n outroutes = [route]\n longest_prefix = curr\n elif curr == longest_prefix:\n outroutes.append(route)\n return outroutes",
"def enumerate_routes(route_list, start=1):\r\n for route in route_list:\r\n yield f\"route{start}\", route\r\n start += 1",
"def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops",
"def get_routes():\n # get from cache if it exists\n routes = cache.get(\"routes\")\n if routes:\n return routes\n\n trips_url = \"https://data.edmonton.ca/api/views/ctwr-tvrd/rows.json?accessType=DOWNLOAD\"\n bus_heading_url = \"https://data.edmonton.ca/resource/atvz-ppyb.json\"\n\n trips_response = requests.get(trips_url)\n bus_heading_response = requests.get(bus_heading_url)\n\n if trips_response.status_code == 200 and bus_heading_response.status_code == 200:\n trips = trips_response.json()\n headings = bus_heading_response.json()\n\n bus_to_headings = {}\n trip_to_bus = {}\n\n for heading in headings:\n if \"route_long_name\" in heading:\n bus_to_headings[heading[\"route_id\"]] = heading[\"route_long_name\"]\n\n for item in trips[\"data\"]:\n trip_id = item[-4]\n bus_number = item[-6]\n if bus_number in bus_to_headings:\n bus_heading = bus_to_headings[bus_number]\n trip_to_bus[trip_id] = [bus_number, bus_heading]\n \n # store the routes in the cache for five minutes\n cache.set(\"routes\", trip_to_bus, timeout=5*60) \n return trip_to_bus",
"def get_partial(routes):\n sequences = defaultdict(Sequence)\n routes.sort(key=attrgetter('finish'))\n count = 0\n\n # Extract start and finish points\n start = [f.start for f in routes]\n finish = [f.finish for f in routes]\n\n # Initial conditions\n sequences[0] = Sequence(0, 0, set())\n sequences[1] = Sequence(routes[0].weight, routes[0].finish, (routes[0],))\n\n # Find the best sequences\n for i in range(2, len(routes) + 1):\n new_seq = sequences[bisect_left(finish, start[i - 1])] + routes[i - 1]\n sequences[i] = max(sequences[i - 1], new_seq)\n\n # Print the best sequences\n if not debug:\n for reg in sequences[len(routes)].regs:\n print(reg.line, end='')\n if debug:\n for reg in sequences[len(routes)].regs:\n count += 1\n\n\n return sequences[len(routes)].weight, count",
"def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)",
"def parse_routes_file(route_filename):\n\n list_route_descriptions = []\n tree = ET.parse(route_filename)\n for route in tree.iter(\"route\"):\n route_town = route.attrib['map']\n route_id = route.attrib['id']\n waypoint_list = [] # the list of waypoints that can be found on this route\n for waypoint in route.iter('waypoint'):\n waypoint_list.append(waypoint) # Waypoints is basically a list of XML nodes\n\n list_route_descriptions.append({\n 'id': route_id,\n 'town_name': route_town,\n 'trajectory': waypoint_list\n })\n\n return list_route_descriptions",
"def get_direction_bound_routes(direction):\n\n direction_bound_routes = []\n trains = [1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 19, 20]\n\n for track in trains:\n parameters = {'cmd': \"routeinfo\", 'key': BART_API_KEY, 'route': track, 'date': \"now\", 'json': \"y\"}\n response = requests.get('http://api.bart.gov/api/route.aspx', params=parameters)\n route = json.loads(response.text)[\"root\"][\"routes\"][\"route\"]\n routeDirection = route[\"direction\"]\n routeID = route[\"routeID\"]\n if routeDirection == direction:\n direction_bound_routes.append(routeID)\n\n return direction_bound_routes",
"def buildRouteLinkSequence(self):\n arcpy.env.workspace = PublicTransit.RTD_PATH\n linkSeq = arcpy.SearchCursor(PublicTransit.BUS_ROUTE_TRAVERSAL_EDGES, \"\", \"\", \"\", \"RouteId A; Cumul_Distance A\")\n prevRouteId = -1\n for e in linkSeq:\n if (e.RouteId in self.routeXref):\n routePattern = self.routeXref[e.RouteId]\n if (routePattern in self.transitRoutes): #not all routes are in RTD, so check\n if (prevRouteId != e.RouteId):\n self.transitRoutes[routePattern].linkSequence = []\n self.transitRoutes[routePattern].linkSequence.append(e.SourceOID)\n prevRouteId = e.RouteId\n del e\n del linkSeq",
"def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes",
"def stops_on_routes():\n routes = ['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', '31A']\n routes_and_stops={}\n for route in routes:\n routes_and_stops[route]=[] #new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route=extract_bus_route(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops",
"def test_trips_starting_at_a_and_ending_at_c_by_4_stops(self):\n railroad = trains.Railroad()\n self.assertEqual(sorted(railroad.find_routes('A', 'C', 4, 4)), sorted(['ABCDC', 'ADCDC', 'ADEBC']))",
"def get_route_list(agency):\n\n # Get XML data containing routeList for sf-muni agency\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=routeList&a=' + agency\n xml_request = requests.get(xml_query_string)\n routes = {}\n root = ET.fromstring(xml_request.text)\n for child in root:\n routes[child.attrib['tag']] = child.attrib['title']\n return routes",
"def get_route_urls(area_url, area_id, lat, long):\n\n # Open page html with BeautifulSoup\n area_doc = urlopen(area_url, context=ctx)\n area_html = area_doc.read()\n # Parses html with BS package\n area_soup = BeautifulSoup(area_html, 'html.parser')\n\n # Opens main body of page\n body = area_soup.body\n # Contains list of all routes in an area\n sidebar = body.find('div', class_='mp-sidebar')\n # Opens routes section\n class_ = 'max-height max-height-md-0 max-height-xs-150'\n table = sidebar.find('div',\n class_=class_)\n table = table.find('table')\n routes = table.find_all('tr', id=None)\n # Gets route url and sends to get_route_features(route_url)\n for route in routes:\n route_url = route.find('a')['href']\n get_route_features(route_url, area_id, lat, long)",
"def lookup_routes(self, daddr):\n # TODO\n outroutes = []\n\n net_pre = daddr[0 : daddr.index('.')] + '.0.0.0'\n\n #print(self.routes)\n\n for ip in self.routes.keys():\n network = self.routes[ip][NTWK]\n net_pre_2 = network[0:network.index('.')] + '.0.0.0'\n if net_pre_2 == net_pre:\n outroutes.append(ip)\n return outroutes",
"def get_routes(solution, routing, manager):\n # Get vehicle routes and store them in a two dimensional array whose\n # i,j entry is the jth location visited by vehicle i along its route.\n routes = []\n for route_nbr in range(routing.vehicles()):\n index = routing.Start(route_nbr)\n route = [manager.IndexToNode(index)]\n while not routing.IsEnd(index):\n index = solution.Value(routing.NextVar(index))\n route.append(manager.IndexToNode(index))\n routes.append(route)\n return routes",
"def get_cities_of_route(network, route):\n result = []\n if len(route) >= 1:\n road_id = route[0]\n result.append( get_start(network,road_id) )\n result.append( get_end(network,road_id) )\n for i in range(1,len(route)):\n road_id = route[i]\n result.append( get_end(network,road_id) )\n return result",
"def _parse_routes(self, data):\n trips = []\n routes = data.findall('./itdRoute')\n for route in routes:\n trip = Trip()\n interchange = None\n for routepart in route.findall('./itdPartialRouteList/itdPartialRoute'):\n part = self._parse_routepart(routepart)\n if interchange is not None:\n if isinstance(part, RideSegment):\n interchange.destination = part[0].platform\n else:\n interchange.destination = part[0].origin\n trip._parts.append(part)\n\n interchange = self._parse_interchange(routepart)\n if isinstance(part, RideSegment):\n if interchange is not None:\n interchange.origin = part[-1].platform\n trip._parts.append(interchange)\n else:\n if interchange is not None:\n part.events = interchange.events\n interchange = None\n\n ticketlist = TicketList()\n tickets = route.find('./itdFare/itdSingleTicket')\n if tickets:\n authority = tickets.attrib['net']\n ticketlist.single = TicketData(authority, tickets.attrib['unitsAdult'], float(tickets.attrib['fareAdult']), float(tickets.attrib['fareChild']))\n ticketlist.bike = TicketData(authority, tickets.attrib['unitsBikeAdult'], float(tickets.attrib['fareBikeAdult']), float(tickets.attrib['fareBikeChild']))\n ticketlist.currency = tickets.attrib['currency']\n ticketlist.level_name = tickets.attrib['unitName']\n for ticket in tickets.findall('./itdGenericTicketList/itdGenericTicketGroup'):\n t = TicketData()\n name = ticket.find('./itdGenericTicket[ticket=\"TICKETTYPE\"]/value')\n if name is None or not name.text:\n continue\n\n authority = ticket.find('./itdGenericTicket[ticket=\"TARIFF_AUTHORITY\"]/value')\n if authority is not None and authority.text:\n t.authority = authority.text\n\n level = ticket.find('./itdGenericTicket[ticket=\"FARE_CATEGORY\"]/value')\n if level is not None and level.text:\n t.level = level.text\n\n prices = []\n adult = ticket.find('./itdGenericTicket[ticket=\"TICKET_ID_ADULT\"]/value')\n if adult is not None and adult.text:\n price = ticket.find('./itdGenericTicket[ticket=\"FARE_ADULT\"]/value')\n if price is not None and price.text:\n prices.append(float(price.text))\n\n child = ticket.find('./itdGenericTicket[ticket=\"TICKET_ID_CHILD\"]/value')\n if child is not None and child.text:\n price = ticket.find('./itdGenericTicket[ticket=\"FARE_CHILD\"]/value')\n if price is not None and price.text:\n prices.append(float(price.text))\n\n if not prices:\n continue\n\n t.price = prices[0]\n if len(prices) == 2:\n t.price_child = prices[1]\n ticketlist.other[name.text] = t\n trip.tickets = ticketlist\n\n trips.append(trip)\n\n return trips"
]
| [
"0.6657399",
"0.623016",
"0.608577",
"0.60547775",
"0.5974454",
"0.5870919",
"0.583147",
"0.58048505",
"0.58005136",
"0.5778787",
"0.5746239",
"0.573472",
"0.57338464",
"0.5691639",
"0.56693816",
"0.5650809",
"0.56485844",
"0.56442",
"0.56375074",
"0.562753",
"0.5611618",
"0.55901915",
"0.55542755",
"0.5525231",
"0.54889345",
"0.54687226",
"0.5454019",
"0.5451216",
"0.5429958",
"0.54281133"
]
| 0.815749 | 0 |
Fetch from url or from file if it has been cached previously | def fetch_maybe(cls, url, path, save=False):
if os.path.isfile(path):
# print("Found %s" % os.path.basename(path))
with open(path, "rb") as file:
return file.read(), True
if save:
return cls.fetch_and_save(url, path), False
return cls.fetch_with_retry(url), False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path",
"def FetchUrlContent(url):\n content = memcache.get(url)\n if content:\n return content\n\n request = urlfetch.fetch(url)\n\n if request.status_code == 200:\n content = request.content\n memcache.add(url, content, 60 * 60)\n return content\n\n raise LookupError('Unable to fetch URL. Response code: ' +\n str(request.status_code))",
"def fetch(self, url) -> StyleInfo:\n caching, caching_delta = parse_cache_option(self.cache_option)\n path = self._get_output_path(url)\n cached = self._get_from_cache(caching, url)\n if cached:\n return path, cached\n\n contents = self._do_fetch(url)\n if not contents:\n return None, \"\"\n\n self._save_to_cache(caching, caching_delta, url, contents)\n return path, contents",
"def load_file_from_url(self, url: str) -> bytes:\n cached_content = self.cache_get(url)\n if cached_content is not None:\n return cached_content\n try:\n req = requests.get(url, timeout=self.requests_timeout)\n req.raise_for_status()\n content = req.content\n self.cache_set(url, content)\n except requests.RequestException as err:\n self.log_error(err)\n repl_content = self.get_replacement_file(url)\n if repl_content is None:\n raise ImageNotFound(err)\n content = repl_content\n return content",
"def get_from_cache(cls, target_filename):\n is_cached = cls.is_remote_cached(target_filename)\n if is_cached:\n cache = cls.CACHE_BACKEND()\n cache.download(is_cached, target_filename)\n logger.debug('File %r was downloaded from %r', target_filename, cls.CACHE_BACKEND)\n else:\n target_filename = None\n return target_filename",
"def checkout(url, filename, cache_timeout=settings.DEFAULT_CACHE_TIMEOUT,\n force=False):\n if not filename:\n return None\n check_cache_dir()\n filepath = os.path.join(settings.CACHE_DIR, filename)\n if not is_cache_file_expired(filepath, cache_timeout=cache_timeout) \\\n and not force:\n f = codecs.open(filepath, 'r', \"utf-8\")\n res = f.read()\n f.close()\n return {\"url\": url, \"result\": res, \"from_cache\": True}\n return fetch_and_parse(url, filepath)",
"def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j",
"def _getFile(url, cachedFile=True, return_filename=False):\n assert url, \"WHY are you trying to load an empty string url?!?! Nothing good will come of this! In fact, I will assure that! %s\" % (url)\n md5 = hashlib.md5(url).hexdigest()\n filename = os.path.join(config.WEB_CACHE_DIR, md5)\n if os.path.exists(filename) and cachedFile:\n ret = open(filename, 'r').read()\n else:\n opener = urllib.FancyURLopener()\n ret = opener.open(url).read()\n o = open(filename, 'wb') # had to open in binary mode so PIL's Image.Open() function would work\n o.write(ret)\n o.close()\n if return_filename:\n return filename\n else:\n return ret",
"def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname",
"def _load_for_cache(self, parsed_uri, session):\n remote_uri = \"{}://{}/{}\".format(parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print(\"Loading URI {}\".format(remote_uri), file=sys.stderr)\n response = session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n \"Could not load file {}\".format(parsed_uri.geturl())\n ) from e\n remote_json = self._load_json(response)\n return remote_json",
"def download(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.download(name, self.l2.get_path(name))\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return self.l2.load(name)\n logging.debug(f'{name} cache miss')\n return None # Cache Miss",
"def safeFetch(self, url, postdata={}, timeout=60):\n resp, status = self.fetch(url, postdata, timeout) \n if resp == None:\n return None\n reUrl = resp.geturl().strip()\n if not self.theSameUrl(reUrl, url):\n return None\n return resp",
"def get(self, url, recache=False):\n\n cachedir = self._cachedir(url)\n cachefilename = self._cachefilename(cachedir)\n\n # If \"filename\" file exists, it's a hit; read the actual filename\n # from there and return the cached content file\n if cachefilename.exists() and not recache:\n logger.debug(f\"Cache hit for {url}\")\n with open(cachefilename) as f:\n filename = f.readline()\n return cachedir / filename\n\n # Cache miss; attempt to download the URL\n with requests.get(url, allow_redirects=True, stream=True,\n timeout=30.0) as r:\n r.raise_for_status()\n\n # Determine download filename\n filename = None\n cd = r.headers.get('content-disposition')\n if cd:\n filenames = re.findall('filename=([^;]+)', cd)\n if len(filenames) > 0:\n filename = filenames[0]\n if filename is None:\n filename = os.path.basename(urllib.parse.urlparse(url).path)\n logger.info(f\"Caching {url} ({filename})\")\n\n cachefile = cachedir / filename\n try:\n # Download file\n with open(cachefile, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n self._writefilename(cachedir, filename)\n\n except:\n if cachefile.exists():\n cachefile.unlink()\n if cachefilename.exists():\n cachefilename.unlink()\n raise\n\n logger.debug(\"Downloaded file\")\n return cachefile",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n\n import requests\n from hashlib import md5\n from pathlib import Path\n\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok=True)\n file_path = data_dir/Path(file)\n # If the file already exists and we want to force a download then\n # delete the file first so that the creation date is correct.\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n resp = requests.get(data_url, stream=True)\n file_size = int(resp.headers.get('content-length', 0))\n step = 40\n chunk_size = file_size//step\n with file_path.open('wb') as f:\n for chunk in resp.iter_content(chunk_size): # write file in chunks\n f.write(chunk)\n step -= 1\n print('[' + '#'*(41 - step) + (step)*' ' + ']\\r', end='')\n print(f\"\\nDownloaded {data_url.split('/')[-1]}!\")\n else:\n import time\n time_downloaded = time.ctime(file_path.stat().st_ctime)\n print(\"Using version already downloaded:\", time_downloaded)\n # Compute and print md5 hash of file, whether newly downloaded or not\n m5 = md5()\n m5.update(file_path.read_bytes())\n print(f\"MD5 hash of file: {m5.hexdigest()}\")\n return file_path",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def retrieve(self, url, filename, reporthook=None, data=None, cont=None):\n url = urllib.unwrap(urllib.toBytes(url))\n if self.tempcache and url in self.tempcache:\n return self.tempcache[url]\n type, url1 = urllib.splittype(url)\n if filename is None and (not type or type == 'file'):\n try:\n fp = self.open_local_file(url1)\n hdrs = fp.info()\n del fp\n return urllib.url2pathname(urllib.splithost(url1)[1]), hdrs\n except IOError, msg:\n pass\n bs = 1024*8\n size = -1\n read = 0\n blocknum = 0\n if cont:\n localsize = self.continue_file(filename)\n read = localsize\n blocknum = localsize / bs\n fp = self.open(url, data)\n headers = fp.info()\n if cont:\n if (self.fetcher.proto == self.fetcher.PROTO_HTTP and\n not (headers.dict.get(\"content-range\") or\n headers.dict.get(\"Content-Range\"))):\n raise ResumeNotSupported\n tfp = open(filename, 'rb+')\n tfp.seek(-self.checksum_size, os.SEEK_END)\n local = tfp.read(self.checksum_size)\n remote = fp.read(self.checksum_size)\n if not local == remote:\n raise ResumeChecksumFailed\n else:\n tfp = open(filename, 'wb')\n result = filename, headers\n if self.tempcache is not None:\n self.tempcache[url] = result\n if reporthook:\n if \"content-length\" in headers:\n size = int(headers[\"Content-Length\"])\n if cont and self.fetcher.proto == self.fetcher.PROTO_HTTP:\n size = size + localsize - self.checksum_size\n reporthook(blocknum, bs, size)\n while 1:\n block = fp.read(bs)\n if block == \"\":\n break\n read += len(block)\n tfp.write(block)\n blocknum += 1\n if reporthook:\n reporthook(blocknum, bs, size)\n fp.close()\n tfp.close()\n del fp\n del tfp\n\n # raise exception if actual size does not match content-length header\n if size >= 0 and read < size:\n raise urllib.ContentTooShortError(\"retrieval incomplete: got only %i out \"\n \"of %i bytes\" % (read, size), result)\n\n return result",
"def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (url_or_filename, Path):\n\t\turl_or_filename = str (url_or_filename)\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\n\tparsed = urlparse (url_or_filename)\n\n\tif parsed.scheme in ('http', 'https', 's3'):\n\t\t# URL, so get it from the cache (downloading if necessary)\n\t\treturn get_from_cache (url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)\n\telif os.path.exists (url_or_filename):\n\t\t# File, and it exists.\n\t\treturn url_or_filename\n\telif parsed.scheme == '':\n\t\t# File, but it doesn't exist.\n\t\traise EnvironmentError (\"file {} not found\".format (url_or_filename))\n\telse:\n\t\t# Something unknown\n\t\traise ValueError (\"unable to parse {} as a URL or as a local path\".format (url_or_filename))",
"def get_from_cache(url, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\tif sys.version_info[0] == 2 and not isinstance (cache_dir, str):\n\t\tcache_dir = str (cache_dir)\n\n\tif not os.path.exists (cache_dir):\n\t\tos.makedirs (cache_dir)\n\n\t# Get eTag to add to filename, if it exists.\n\tif url.startswith (\"s3://\"):\n\t\tetag = s3_etag (url, proxies=proxies)\n\telse:\n\t\ttry:\n\t\t\tresponse = requests.head (url, allow_redirects=True, proxies=proxies)\n\t\t\tif response.status_code != 200:\n\t\t\t\tetag = None\n\t\t\telse:\n\t\t\t\tetag = response.headers.get (\"ETag\")\n\t\texcept EnvironmentError:\n\t\t\tetag = None\n\n\tif sys.version_info[0] == 2 and etag is not None:\n\t\tetag = etag.decode ('utf-8')\n\tfilename = url_to_filename (url, etag)\n\n\t# get cache path to put the file\n\tcache_path = os.path.join (cache_dir, filename)\n\n\t# If we don't have a connection (etag is None) and can't identify the file\n\t# try to get the last downloaded one\n\tif not os.path.exists (cache_path) and etag is None:\n\t\tmatching_files = fnmatch.filter (os.listdir (cache_dir), filename + '.*')\n\t\tmatching_files = list (filter (lambda s: not s.endswith ('.json'), matching_files))\n\t\tif matching_files:\n\t\t\tcache_path = os.path.join (cache_dir, matching_files[-1])\n\n\tif not os.path.exists (cache_path) or force_download:\n\t\t# Download to temporary file, then copy to cache dir once finished.\n\t\t# Otherwise you get corrupt cache entries if the download gets interrupted.\n\t\twith tempfile.NamedTemporaryFile () as temp_file:\n\t\t\tlogger.info (\"%s not found in cache or force_download set to True, downloading to %s\", url, temp_file.name)\n\n\t\t\t# GET file object\n\t\t\tif url.startswith (\"s3://\"):\n\t\t\t\ts3_get (url, temp_file, proxies=proxies)\n\t\t\telse:\n\t\t\t\thttp_get (url, temp_file, proxies=proxies)\n\n\t\t\t# we are copying the file before closing it, so flush to avoid truncation\n\t\t\ttemp_file.flush ()\n\t\t\t# shutil.copyfileobj() starts at the current position, so go to the start\n\t\t\ttemp_file.seek (0)\n\n\t\t\tlogger.info (\"copying %s to cache at %s\", temp_file.name, cache_path)\n\t\t\twith open (cache_path, 'wb') as cache_file:\n\t\t\t\tshutil.copyfileobj (temp_file, cache_file)\n\n\t\t\tlogger.info (\"creating metadata file for %s\", cache_path)\n\t\t\tmeta = {'url': url, 'etag': etag}\n\t\t\tmeta_path = cache_path + '.json'\n\t\t\twith open (meta_path, 'w') as meta_file:\n\t\t\t\toutput_string = json.dumps (meta)\n\t\t\t\tif sys.version_info[0] == 2 and isinstance (output_string, str):\n\t\t\t\t\toutput_string = unicode (output_string, 'utf-8') # The beauty of python 2\n\t\t\t\tmeta_file.write (output_string)\n\n\t\t\tlogger.info (\"removing temp file %s\", temp_file.name)\n\n\treturn cache_path",
"def _make_http_request_read(self, path):\n url = self.url_base + path\n if url not in self._requests_cache:\n self._requests_cache[url] = self._perform_http_request(url)[2]\n return self._requests_cache[url]",
"def cached_json_get(url):\n return requests.get(url).json()",
"def get_from_cache(self, url):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n\n cache_timeout = self.cache_timeouts.get(cache_key,\n self.default_cache_timeout)\n\n data, access_time = MEM_CACHE[cache_key].get(cache_lookup, (None, 0))\n if data and time.time() - access_time < cache_timeout:\n return data\n return False",
"def request_data(url): \n requests_cache.install_cache('data_cache')\n while True:\n data = requests.get(url)\n if not data.status_code == 200 or \"try again later\" in data.text:\n continue\n else:\n break\n return data.text",
"def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)",
"def fetch(cls, url):\n delta = time.time() - cls._time_last_fetched\n wait_time = TIME_TIL_RETRY - delta\n if wait_time > 0:\n time.sleep(wait_time)\n resp = requests.get(url)\n cls._time_last_fetched = time.time()\n resp.raise_for_status()\n return resp",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def _load_for_cache(self, doc_uri, doc, parsed_uri):\n remote_uri = '{}://{}/{}'.format(\n parsed_uri.scheme, parsed_uri.netloc, parsed_uri.path)\n if self.verbose:\n print('Loading URI {}'.format(remote_uri), file=sys.stderr)\n response = self.session.get(remote_uri)\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise RefResolutionException(\n 'Could not load file {}'.format(parsed_uri.geturl()))\n remote_json = self._load_json(response)\n return remote_json",
"def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content",
"def load_url_data(url, fname=None, cache_time=0, nretry=3, sleeptime=60):\n\n content = None\n if url and is_file_expired(fname, cache_time): # load data into temporary cache file\n for trial in range(nretry):\n if content:\n break\n try:\n if os.path.isfile(url):\n logger.info('[attempt=%s] loading data from file=%s' % (trial, url))\n with open(url, \"r\") as f:\n content = f.read()\n else:\n logger.info('[attempt=%s] loading data from url=%s' % (trial, url))\n content = urllib2.urlopen(url, timeout=20).read()\n\n if fname: # save to cache\n with open(fname, \"w+\") as f:\n f.write(content)\n logger.info('saved data from \"%s\" resource into file=%s, length=%.1fKb' %\n (url, fname, len(content) / 1024.))\n return content\n except Exception as e: # ignore errors, try to use old cache if any\n logger.warning('failed to load data from url=%s, error: %s .. trying to use data from cache=%s' %\n (url, e, fname))\n # will try to use old cache below\n if trial < nretry - 1:\n logger.info(\" -- DEPRECATED-- will try again after %ss..\" % sleeptime)\n from time import sleep\n sleep(sleeptime)\n\n if content is not None: # just loaded\n return content\n\n try:\n with open(fname, 'r') as f:\n content = f.read()\n except Exception as e:\n logger.warning(\"%s (will try different source)\" % e)\n return None\n\n return content",
"def fetch(self, url, listener, useCache = True): #$NON-NLS-1$\r",
"def get(self):\n # If we have a cache_key, see if there is data under that key\n # in our url cache and use that if there is.\n #\n if self.cache_key and self.cache_key in self.cache:\n return self.cache[self.cache_key]\n\n # If the actual URL is the empty string, and we did not have a cached\n # result for it, then we can not retrieve anything. Return None.\n #\n if self.url is None or len(self.url) == 0:\n return None\n\n if not self.use_post:\n # If we are NOT using 'POST' to query the URL we can create a\n # simple urllib2.Request object.\n #\n req = urllib2.Request(self.url)\n else:\n # If we ARE using 'POST' then we need to interpret the\n # parameters out of the URL and pass them as the 'data'\n # parameter to the request object we are creating. This will\n # cause 'urlopen()' to use POST to get the results.\n #\n o = urlparse.urlsplit(self.url)\n req = urllib2.Request(o.scheme + \"://\" + o.netloc + o.path, o.query)\n\n # If 'spoof_url' is NOT None, then we\n # want our request to use the 'spoof_url' as its referrer\n #\n if self.spoof_url is not None:\n req.add_header('Referer', self.spoof_url)\n\n # What we get from the remote site is UTF-8 so decode it in to unicode\n # and then encode that as ASCII with characters that can not be\n # represented in ASCII replaced with their XML character references.\n #\n f = urllib2.urlopen(req)\n content_type = f.info()['Content-Type'].lower()\n\n # Based on the content type we need to deal with the response\n # in various ways, like unzip, or re-encoding as ascii.\n #\n if content_type == \"application/zip\":\n # In zip files we extract all the individual files.\n #\n # NOTE: Since the zipfile.ZipFile class needs a file like object\n # with the 'seek()' method we use a StringIO to hold\n # our url result data.\n #\n result = []\n stringy = StringIO(f.read())\n z = zipfile.ZipFile(stringy, 'r')\n members = z.namelist()\n for member in members:\n result.append(z.read(member))\n z.close()\n stringy.close()\n\n # The way the scraper wants to work is that it gets all parts\n # of such a zip file as a single string.. so join them all\n # together (separated by a newline character, just because.)\n #\n result = \"\\n\".join(result)\n elif content_type[0:9] == \"text/xml;\":\n ign,charset = content_type.split('=')\n\n # XXX We should just return what we get and not encode it as\n # ascii. The end point should encode if it only wants to\n # see a string... (or maybe we SHOULD do this..)\n #\n result = f.read().decode(charset).encode(\"ascii\",\n \"xmlcharrefreplace\")\n else:\n # Finally we do not know what to do with it.. just read it\n # in to a string.\n #\n result = f.read()\n\n f.close()\n if self.cache_key:\n self.cache[self.cache_key] = result\n return result"
]
| [
"0.71518296",
"0.7033755",
"0.70170456",
"0.70076776",
"0.6999763",
"0.6917222",
"0.68979603",
"0.6897935",
"0.6821177",
"0.6820702",
"0.6801673",
"0.67860883",
"0.6752806",
"0.6752673",
"0.6666926",
"0.66282046",
"0.6614238",
"0.6608379",
"0.66022563",
"0.6602047",
"0.65935",
"0.6589468",
"0.65720814",
"0.6565355",
"0.65472406",
"0.6539647",
"0.65280515",
"0.6527513",
"0.65179145",
"0.64836055"
]
| 0.76373774 | 0 |
Fetch file and save to disk | def fetch_and_save(cls, url, path):
content = cls.fetch_with_retry(url)
if not content:
return False
# print("Saving {}".format(os.path.basename(path)))
with open(path, "wb") as file:
file.write(content)
return content | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def save(self, filename: str):\n r = requests.get(self.raw_url, stream=True)\n if r.status_code == 404:\n raise Exception(f\"Document {self.key} does not exist\")\n r.raise_for_status()\n \n with open(filename, 'wb') as file:\n for chunk in r.iter_content(10 * 1024):\n file.write(chunk)",
"def fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n \n self.log.debug(\"File will be saved as '%s'...\" % output)\n\n with open(output, 'wb') as f:\n f.write(requests.get(location, verify=self.ssl_verify).content)\n\n return output",
"def __download_file(self, filename):\r\n \r\n respons = requests.get(self.__url + filename, stream=True)\r\n save_filename = os.path.join(self.__folder, os.path.basename(filename))\r\n with open(save_filename, 'wb') as output_file:\r\n for chunk in respons.iter_content(chunk_size=128):\r\n output_file.write(chunk)",
"def download_and_save(url, file_name,file_extension):\n #make a request for the file\n response = requests.get(url, allow_redirects =True)\n\n #compose the file + extension\n file_to_be_saved = f\"{file_name}.{file_extension}\"\n \n #Create a new file with \"file_to_be_saved\" in the current directory\n # And save this file and print the directory with the OS module\n with open(file_to_be_saved, 'wb') as file:\n print(\"saving file.... \\n\")\n file.write(response.content)\n print('done....\\n')\n print('file saved as: ', file_to_be_saved )\n print('in: ', os.getcwd() )",
"def download_file(self, filename: str, save_dir: str) -> None:\n raise NotImplementedError()",
"def fetch_repo_file(self, path, save = False, mode = 'w'):\n\t\ttry:\n\t\t\tprint(\"Fetching repo file: {0}\".format(self.config[\"repo\"][\"repo_proto\"] + \"://\" + self.config[\"repo\"][\"repo_addr\"] + \":\" + self.config[\"repo\"][\"repo_port\"] + path))\n\t\t\n\t\t\tdata = urllib.request.urlopen(self.config[\"repo\"][\"repo_proto\"] + \"://\" + self.config[\"repo\"][\"repo_addr\"] + \":\" + self.config[\"repo\"][\"repo_port\"] + path).read()\n\n\t\t\tif save != False:\n\t\t\t\tf = open(path, mode)\n\t\t\t\tf.write(data)\n\t\t\t\tf.close()\n\t\t\treturn data\n\t\texcept Exception as e:\n\t\t\tprint(\"Failed to connect to server, exiting...\");\n\t\t\tsys.exit(1)",
"def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()",
"def read_and_save(res):\n fname = os.path.split(urlsplit(res.url).path)[-1]\n fpath = os.path.join(cfg.OUTPUT_DIR, fname)\n with open(fpath, 'wb') as f:\n for chunk in res.iter_content(cfg.CHUNK):\n f.write(chunk)",
"def to_file(self, filename):\n resp = urlopen(self.url)\n self.file_size = self._get_content_length(resp.headers)\n block_size = 8192\n self.bytes_read = 0\n with open(filename, 'wb') as f:\n while True:\n buf = resp.read(block_size)\n if not buf:\n break\n self.bytes_read += len(buf)\n f.write(buf)\n self._dl_progress_bar()\n if self.show_progress:\n print(' ✓')",
"def fetch_file(self, path, content_type, response=settings.HTTP_OK):\n try:\n with open(path) as fp:\n self.fetch_content(fp.read(), content_type, response)\n except IOError:\n self.send_error(settings.HTTP_INTERNAL_SERVER_ERROR)",
"def fetch(file_url):\n\n tmp_file_handle = NamedTemporaryFile(delete=True)\n headers = {'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n\n # download file and save to temp object\n with requests.get(file_url, headers=headers, stream=True) as r:\n tmp_file_handle.write(r.content)\n\n tmp_file_handle.flush()\n\n return tmp_file_handle",
"def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)",
"def fetch(file):\n\tprint \"Fetching {0}...\".format(file.split(\"/\")[-1])\n\tsubprocess.call(\"wget {0} > /dev/null 2>&1\".format(file), shell=True)",
"def save_file(s, filepath, download_url, cookie):\n\n with open(filepath, \"wb\") as f:\n response = s.get(download_url, cookies=cookie, stream=True)\n total = response.headers.get(\"content-length\")\n if total is None:\n f.write(response.content)\n else:\n downloaded = 0\n total = int(total)\n for data in response.iter_content(chunk_size=max(int(total / 1000),\n 1024 * 1024)\n ):\n downloaded += len(data)\n f.write(data)\n done = int(50 * downloaded / total)\n sys.stdout.write(\n \"\\r[{}{}]\".format(\"█\" * done, \".\" * (50 - done))\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")",
"def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")",
"def _download_file(self, path, info=None):\n self._log.debug(\"Downloading file {!r}\".format(path))\n\n if info is None:\n info = self._git_show(path)\n\n # info *SHOULD* be a basestring\n if not isinstance(info, basestring):\n raise Exception(\"{!r} was not a file! (info was {!r})\".format(\n path,\n info\n ))\n\n dest_path = os.path.join(self._code_dir, path.replace(\"/\", os.path.sep))\n self._save_file(dest_path, info)",
"def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)",
"def GetAndSave(self, url, save_suffix, unzip=False):\n self.connection.request('GET',\n '/data/' + url,\n headers={'content-type': 'text/plain'})\n response = self.connection.getresponse()\n file_name = Clean(url) + save_suffix\n destination = os.path.join(self.path, file_name)\n\n if response.status != 200:\n raise IOError(url)\n\n if unzip:\n s = StringIO.StringIO(response.read())\n content = gzip.GzipFile(fileobj=s).read()\n else:\n content = response.read()\n\n with open(destination, 'w') as f:\n f.write(content)\n return content",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path",
"def cache_and_save(self):\n if self.url and not self.photo:\n result = urllib.urlretrieve(self.url)\n self.photo.save(\n os.path.basename(self.url),\n File(open(result[0], 'rb')),\n )\n self.save()",
"def save_data(self):\n # Command to get the download data\n pass",
"def httpretrieve_save_file(url, filename, querydata=None, postdata=None, \\\r\n httpheaders=None, proxy=None, timeout=None):\r\n\r\n # Open the output file object and http file-like object.\r\n outfileobj = open(filename, 'w')\r\n httpobj = httpretrieve_open(url, querydata=querydata, postdata=postdata, \\\r\n httpheaders=httpheaders, proxy=proxy, timeout=timeout)\r\n\r\n # Repeatedly read from the file-like HTTP object into our file, until the\r\n # response is finished.\r\n responsechunkstr = None\r\n while responsechunkstr != '':\r\n responsechunkstr = httpobj.read(4096)\r\n outfileobj.write(responsechunkstr)\r\n\r\n outfileobj.close()\r\n httpobj.close()",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH",
"def save_file(self, name, file):\n fs = FileSystemStorage()\n filename = fs.save(name, file)\n return fs.path(filename), fs.url(filename)",
"def save_file(self, response):\r\n # Extract filename from response url\r\n filename = re.search('[^/]+(?=/$|$)', response.url).group(0)\r\n\r\n # Prepend download folder name to the filename\r\n filename = self.config[\"folder\"] + filename\r\n os.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n # Write contents to file\r\n with open(filename, 'wb') as f:\r\n f.write(response.content)\r\n\r\n # Print message displaying the absolute filepath for convenience\r\n print(\"Downloaded file to \" + os.path.abspath(filename))",
"def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()",
"def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()",
"def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )"
]
| [
"0.74186593",
"0.6852157",
"0.67538196",
"0.6674495",
"0.66473705",
"0.66278994",
"0.65727353",
"0.6508837",
"0.6444663",
"0.64405197",
"0.6414596",
"0.6410541",
"0.63965726",
"0.6357957",
"0.63492334",
"0.6310455",
"0.6295792",
"0.6293493",
"0.6283312",
"0.6273704",
"0.6268726",
"0.62616277",
"0.6260099",
"0.6256048",
"0.6256048",
"0.62441427",
"0.62426156",
"0.61938316",
"0.61935085",
"0.61860394"
]
| 0.7322474 | 1 |
Implementatin of sim(n1, n2) from Collins and Duffy (2001). Parsing with a Single Neuron. Given two (dependency parse) graphs and a node from each one, returns the set of common dependency targets. Each target consists of a (target node ID, target node ID) tuple, where the first target is from the first graph and the second one is from the second graph. What is a dependency target? We check all outgoing edges of node n1 from graph1 and n2 from graph2 for common dependency relations (e.g. 'dt' or 'subj'). If both nodes share a relation, we check if the neighboring nodes we reach with this relation represent the same token. If they do, a (n1 target node, n2 target node) dependency target is added to the result set. | def common_dependency_targets(graph1, graph2, n1, n2, node_attrib='label',
edge_attrib='label'):
n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)
n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)
n1_rels, n2_rels = defaultdict(list), defaultdict(list)
for source_set, target_dict in ((n1_children, n1_rels), (n2_children, n2_rels)):
for rel, target in source_set:
target_dict[rel].append(target)
common_rels = set(n1_rels) & set(n2_rels) # intersection
common_deps = set()
for rel in common_rels:
for n1_target in n1_rels[rel]:
n1_target_word = graph1.node[n1_target][node_attrib]
for n2_target in n2_rels[rel]:
n2_target_word = graph2.node[n2_target][node_attrib]
if n1_target_word == n2_target_word:
common_deps.add( (n1_target, n2_target) )
return common_deps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict",
"def get_common_targets(\n self,\n sources: List[Tuple[str, str]],\n relation: str,\n ) -> List[Node]:\n rel_str = \":%s\" % relation if relation else \"\"\n parts = [\n \"({id: '%s'})-[%s]->(t)\" % (norm_id(*source), rel_str) for source in sources\n ]\n query = \"\"\"\n MATCH %s\n RETURN DISTINCT t\n \"\"\" % \",\".join(\n parts\n )\n nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]\n return nodes",
"def get_common_sources(\n self, targets: List[Tuple[str, str]], relation: str\n ) -> List[Node]:\n rel_str = \":%s\" % relation if relation else \"\"\n parts = [\n \"(s)-[%s]->({id: '%s'})\" % (rel_str, norm_id(*target)) for target in targets\n ]\n query = \"\"\"\n MATCH %s\n RETURN DISTINCT s\n \"\"\" % \",\".join(\n parts\n )\n nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]\n return nodes",
"def compare_nodes(n1, n2):\n if not isinstance(n1, dict):\n raise TypeError(\"Invalid n1! Expected dict, got %s instead\" %\n type(n1).__name__)\n if not isinstance(n2, dict):\n raise TypeError(\"Invalid n2! Expected dict, got %s instead\" %\n type(n2).__name__)\n\n if not Pharmacophore.check_node(n1):\n raise ValueError(\"Invalid n1!\")\n\n if not Pharmacophore.check_node(n2):\n raise ValueError(\"Invalid n2!\")\n\n c = n1[\"freq\"] + n2[\"freq\"]\n d1 = sum(n1[\"type\"].values())\n d2 = sum(n2[\"type\"].values())\n d = d1 + d2\n sim = 0.0\n t = {}\n\n for phar in PHARS:\n if phar in n1[\"type\"] and phar in n2[\"type\"]:\n sim += (n1[\"type\"][phar] + n2[\"type\"][phar]) / d\n t[phar] = n1[\"type\"][phar] + n2[\"type\"][phar]\n elif phar in n1[\"type\"]:\n t[phar] = n1[\"type\"][phar]\n elif phar in n2[\"type\"]:\n t[phar] = n2[\"type\"][phar]\n return sim * c, t",
"def count_common_subgraphs(graph1, graph2, n1, n2,\n node_attrib='label', edge_attrib='label'):\n for graph in (graph1, graph2):\n assert nx.is_directed_acyclic_graph(graph)\n \n if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:\n return 0\n\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n\n if not n1_children or not n2_children:\n return 0\n else:\n result = 1 # neutral element of multiplication\n for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,\n node_attrib=node_attrib):\n result *= (count_common_subgraphs(graph1, graph2,\n n1_target, n2_target,\n node_attrib='label',\n edge_attrib='label') + 2)\n return result - 1",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def dependency_similarity(s1, s2):\n # pass\n parsed_sentence_1 = parser.raw_parse(s1)\n parsed_sentence_2 = parser.raw_parse(s2)\n \n tree1 = next(parsed_sentence_1)\n tree2 = next(parsed_sentence_2)\n \n triples1 = [t for t in tree1.triples()]\n triples2 = [t for t in tree2.triples()] \n\n # Compute similarity\n if len(triples1) != 0 and len(triples2) != 0:\n similarity = 1 - jaccard_distance(set(triples1), set(triples2))\n return similarity\n else:\n return 0",
"def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res",
"def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2",
"def jssim_dist(G1, G2, nodes=None):\n if nodes is None:\n nodes = G1.nodes() | G2.nodes()\n\n sims = []\n for n in nodes:\n set1, set2 = set(G1.neighbors(n)), set(G2.neighbors(n))\n neighbors = list(set1 | set2)\n p1 = np.array([1./len(set1) if _n in set1 else 0 for _n in neighbors])\n p2 = np.array([1./len(set2) if _n in set2 else 0 for _n in neighbors])\n sims.append(1 - jsdiv(p1, p2))\n \n return sims",
"def match_nodes(source_node, target_node):\n\n node_position = cmds.xform(source_node, q=True, ws=True, t=True)\n node_rotation = cmds.xform(source_node, q=True, ws=True, ro=True)\n cmds.xform(target_node, ws=True, t=node_position)\n cmds.xform(target_node, ws=True, ro=node_rotation)",
"def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result",
"def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g",
"def merge_networks_in_parallel(n1, n2):\n # This operation is not defined if both networks are linked.\n assert not (n1.is_linked and n2.is_linked), (n1, n2)\n\n if n1.is_linked:\n return merge_networks_in_parallel(n2, n1)\n\n # Either n2 is linked and n1 not or both are not linked.\n assert not n1.is_linked\n\n new_l_size = n1.l_size + n2.l_size\n new_u_size = n1.u_size + n2.u_size\n res_is_linked = n1.is_linked or n2.is_linked\n\n # Merge 0-poles.\n n1.zero_pole.insert_all_before(n2.zero_pole.prior)\n\n # Merge inf-poles.\n n1.inf_pole.insert_all_after(n2.inf_pole.next)\n\n # Remove the link edge in n1\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n\n res = Network(n2.zero_pole, res_is_linked, new_l_size, new_u_size)\n res.type = 'P'\n return res\n\n # # Merge their 0-poles.\n # first_net_zero_pole_prior = first_net_zero_pole_edge.prior\n # second_net_zero_pole_next = second_net_zero_pole_edge.next\n # second_net_zero_pole_prior = second_net_zero_pole_edge.prior\n # first_net_zero_pole_edge.prior = second_net_zero_pole_prior\n # second_net_zero_pole_prior.next = first_net_zero_pole_edge\n # first_net_zero_pole_prior.next = second_net_zero_pole_next\n # second_net_zero_pole_next.prior = first_net_zero_pole_prior\n #\n # # Update the node numbers in the zero pole.\n # half_edge_walker = first_net_zero_pole_edge.next\n # while half_edge_walker != first_net_zero_pole_edge:\n # half_edge_walker.node_nr = first_net_zero_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n\n # # Merge their inf-poles\n # first_net_inf_pole_next = first_net_inf_pole_edge.next\n # second_net_inf_pole_prior = second_net_inf_pole_edge.prior\n # second_net_inf_pole_next = second_net_inf_pole_edge.next\n # first_net_inf_pole_edge.next = second_net_inf_pole_next\n # second_net_inf_pole_next.prior = first_net_inf_pole_edge\n # first_net_inf_pole_next.prior = second_net_inf_pole_prior\n # second_net_inf_pole_prior.next = first_net_inf_pole_next\n #\n # # Update the node numbers in the inf pole\n # half_edge_walker = first_net_inf_pole_edge.next\n # while half_edge_walker != first_net_inf_pole_edge:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next",
"def merge(self, g1, g2):\n logger = logging.getLogger(__name__)\n \n \n g = BaseGraph()\n g.copy_graph_from(g1)\n\n plwn2sumo_dict = defaultdict(set)\n plwn2sumo_dict = self.get_plwn2sumo_dict()\n\n synset_on_vertex_dict = {}\n for node in g.all_nodes():\n synset_id = node.synset.synset_id\n if synset_id in synset_on_vertex_dict:\n logger.warning(\"ID of some synset is not unique.\")\n continue\n synset_on_vertex_dict[synset_id] = node\n\n num_of_edge = 0\n for edge in g2.all_edges():\n num_of_edge += 1\n logger.info(\"%d/%d\", num_of_edge, g2.num_edges())\n\n parent_sumo_concept = edge.source().sumo\n child_sumo_concept = edge.target().sumo\n\n if parent_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", parent_sumo_concept)\n continue\n if child_sumo_concept not in plwn2sumo_dict:\n logger.warning(\"The mapping file doesn't contain sumo concept '%s'.\", child_sumo_concept)\n continue\n\n for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]:\n if parent_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", parent_syn_id)\n continue\n p_node = synset_on_vertex_dict[parent_syn_id]\n for child_syn_id in plwn2sumo_dict[child_sumo_concept]:\n if child_syn_id not in synset_on_vertex_dict:\n logger.warning(\"The mapping file contains synset '%d' that is not in the graph.\", child_syn_id)\n continue\n ch_node = synset_on_vertex_dict[child_syn_id]\n \n g.add_edge(p_node,\n ch_node,\n [(\"rel\", edge.rel)],\n simply=True)\n \n\n return g",
"def get_sim(file, graph1, graph2, pickle_name = \"\"):\r\n def build_sim(file, graph1, graph2):\r\n similarity = np.zeros((len(graph1), len(graph2)))\r\n if file.endswith(\"xz\"):\r\n with lzma.open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n else:\r\n with open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n return similarity\r\n \r\n #if \".sim.pickle\" == pickle_name:\r\n # pickle_name = graph1.name + graph2.name + pickle_name\r\n if pickle_name == \"\":\r\n pickle_name = graph1.name + graph2.name + \".sim.pickle\"\r\n try:\r\n with open(pickle_name,'rb') as f:\r\n return pickle.load(f)\r\n except FileNotFoundError as e:\r\n sims = build_sim(file, graph1, graph2)\r\n with open(pickle_name,'wb') as f:\r\n pickle.dump(sims,f)\r\n return sims",
"def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0",
"def nn_set2set_match(descs1, descs2):\n idxs = nn_set2set_match_cuda(descs1.unsqueeze(0).cuda(), descs2.unsqueeze(0).cuda()).detach().cpu().long()\n return idxs[0]",
"def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1",
"def joint_graph(graph, nodes):\n # TODO\n joint_graph = nodes = None\n\n return joint_graph, nodes",
"def test_graph2():\n mol_graph1 = DGLGraph([(0, 1), (0, 2), (1, 2)])\n mol_graph2 = DGLGraph([(0, 1), (1, 2), (1, 3), (1, 4)])\n batch_mol_graph = dgl.batch([mol_graph1, mol_graph2])\n node_feats = torch.arange(batch_mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * batch_mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph1 = get_complete_graph(mol_graph1.number_of_nodes())\n complete_graph2 = get_complete_graph(mol_graph2.number_of_nodes())\n batch_complete_graph = dgl.batch([complete_graph1, complete_graph2])\n atom_pair_feats = torch.arange(batch_complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return batch_mol_graph, node_feats, edge_feats, batch_complete_graph, atom_pair_feats",
"def wordNet_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n \r\n # sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence1=st_tagger.tag(word_tokenize(sentence1))\r\n \r\n # sentence2 = pos_tag(word_tokenize(sentence2))\r\n sentence2=st_tagger.tag(word_tokenize(sentence2))\r\n\r\n \r\n # Get the synsets for the tagged words\r\n #################################################\r\n\r\n # synsets1=[]\r\n # synsets2=[]\r\n # for tagged_word in sentence1:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # synsets1.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n # for tagged_word in sentence2:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # print(tagged_word)\r\n # synsets2.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n\r\n # The code above is the elaboration of code below\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones in the synonym set\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n###########################################################################\r\n # for syn1 in synsets1:\r\n # arr_simi_score = []\r\n # print('=========================================')\r\n # print(syn1)\r\n # print('----------------')\r\n # for syn2 in synsets2:\r\n # print(syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n # print(simi_score)\r\n # if simi_score is not None:\r\n # arr_simi_score.append(simi_score)\r\n # print('----------------')\r\n # print(arr_simi_score)\r\n # if(len(arr_simi_score) > 0):\r\n # best = max(arr_simi_score)\r\n # print(best)\r\n # score += best\r\n # count += 1\r\n # # Average the values\r\n # print('score: ', score)\r\n # print('count: ', count)\r\n # score /= count\r\n\r\n###########################################################################\r\n\r\n for syn1 in synsets1:\r\n arr_simi_score = []\r\n # print('=========================================')\r\n print(\"Each word from Synonym se1\",syn1)\r\n # print('----------------')\r\n for syn2 in synsets2:\r\n print(\"Each word from Synonym se2\",syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n simi_score = syn1.wup_similarity(syn2)\r\n print(\"word to word path_similarity score\",simi_score)\r\n if simi_score is not None:\r\n arr_simi_score.append(simi_score)\r\n print('----------------')\r\n print(arr_simi_score)\r\n if(len(arr_simi_score) > 0):\r\n best = max(arr_simi_score)\r\n print(\"best score so far\", best)\r\n score += best\r\n count += 1\r\n # Average the values\r\n print('score: ', score)\r\n print('count: ', count)\r\n if count!=0:\r\n score /= count\r\n else:\r\n score=0.0\r\n return score",
"def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response",
"def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)",
"def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node",
"def connecting(node1, node2):\n comp_list = []\n \"\"\":type : list[components.Component]\"\"\"\n if node1 == node2:\n return []\n for comp in node1.connected_comps:\n if comp.neg == node2:\n comp_list.append(comp)\n elif comp.pos == node2:\n comp_list.append(comp)\n return comp_list",
"def conversion(data_path, nodes, simtime, resolution=1):\r\n\r\n receivers = set() # i.e. neurons; senders can be external inputs\r\n edges_set = set()\r\n\r\n # Ask number of neurons per node\r\n nodes_dict = dict()\r\n if input(\"Every node has same number of neurons? (y/n)\") == \"y\":\r\n n = int(input(\"Number of neurons per node: \"))\r\n for node in range(nodes):\r\n nodes_dict[node] = n\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n else:\r\n for node in range(nodes):\r\n nodes_dict[node] = int(input(\"Number of neurons in node %s: \" % node))\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n\r\n receivers = sorted(list(receivers))\r\n\r\n # Load a part of FNS data to gather information about edges\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=500000, nrows=2000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n for node in range(nodes):\r\n temp = chunk.loc[\r\n (chunk[\"Burning Node\"] == node) & (chunk[\"External Source\"] == False)] # Remove external inputs\r\n for i in range(len(temp)):\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (\r\n node + 1, nodes, i + 1, len(temp)), end=\"\\r\")\r\n sender = str(np.asarray(temp[\"Firing Node\"])[i]) + \"-\" + str(np.asarray(temp[\"Firing Neuron\"])[i])\r\n receiver = str(node) + \"-\" + str(np.asarray(temp[\"Burning Neuron\"])[i])\r\n edges_set.add((sender, receiver))\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (node + 1, nodes, i + 1, len(temp)))\r\n\r\n status_table = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=1000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n\r\n # Loop over times instead of receivers: slightly better efficiency for large datasets\r\n times_raw = sorted(set(chunk[\"Burning Time\"]))\r\n status_table_temp = pd.DataFrame(columns=receivers)\r\n status_table_aux = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n times = list(np.arange(0, int(max(times_raw) + 1), 1 / resolution))\r\n min_t = int(np.trunc(min(chunk[\"Burning Time\"])))\r\n max_t = int(np.trunc(max(chunk[\"Burning Time\"])))\r\n\r\n for t in times[min_t:max_t]:\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t), end=\"\\r\")\r\n temp = chunk.loc[np.trunc(chunk[\"Burning Time\"]) == t, (\"Burning Node\", \"Burning Neuron\")]\r\n # Count burning events for each neuron at time t\r\n count = Counter([str(node) + \"-\" + str(neuron) for node, neuron in np.asarray(temp)])\r\n # Every time step we add a Counter dict to fill status table\r\n status_table_temp = status_table_temp.append(count, ignore_index=True)\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t))\r\n\r\n # status_table_temp contains just a limited space of time each chunk\r\n status_table_temp = status_table_temp.transpose()\r\n status_table_temp.columns = list(\r\n np.arange(start=int(np.trunc(min(chunk[\"Burning Time\"]))), stop=int(np.trunc(max(chunk[\"Burning Time\"])))))\r\n status_table_temp = status_table_temp.fillna(0)\r\n\r\n # status_table_aux contains all time steps with temp table values and 0s for the rest each chunk\r\n status_table_aux = status_table_aux.add(status_table_temp)\r\n status_table_aux = status_table_aux.fillna(0)\r\n\r\n # status_table merges all chunks' data\r\n status_table = status_table.add(status_table_aux)\r\n status_table = status_table.fillna(0)\r\n\r\n timestamp = datetime.now()\r\n new_dir = \"gephiFiles\" + timestamp.strftime(\"d%d_%m_%Y-t%H_%M_%S\")\r\n os.mkdir(new_dir)\r\n\r\n # Generate gephi compliant files\r\n t = \"<\" + str(times) + \">\"\r\n gephi_nodes = pd.DataFrame(columns=[\"id\", \"label\", \"timeset\", \"events\", \"node\"])\r\n for idx, node in enumerate(receivers):\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)), end=\"\\r\")\r\n events_row = [[float(i), int(events)] for i, events in enumerate(status_table.loc[node])]\r\n events_row = str(events_row).replace(\"[[\", \"<[\").replace(\"]]\", \"]>\").replace(\"],\", \"];\")\r\n new_row = pd.Series([node, node, t, events_row, node.split(\"-\")[0]], index=gephi_nodes.columns)\r\n gephi_nodes = gephi_nodes.append(new_row, ignore_index=True)\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)))\r\n gephi_nodes.to_csv(new_dir + \"/gephi_nodes.csv\", index=False)\r\n\r\n print(\"Compute gephi files with %i edges will last %0.2fm approx.\" % (len(edges_set), len(edges_set) / 12000))\r\n if input(\"Do you want to proceed? (y/n) \") == \"n\":\r\n exit()\r\n gephi_edges = pd.DataFrame(columns=[\"Source\", \"Target\", \"type\", \"id\", \"weight\"])\r\n for idx, edge in enumerate(edges_set):\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)), end=\"\\r\")\r\n edge_row = pd.Series([edge[0], edge[1], \"Directed\", idx, 1], index=gephi_edges.columns)\r\n gephi_edges = gephi_edges.append(edge_row, ignore_index=True)\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)))\r\n gephi_edges.to_csv(new_dir + \"/gephi_edges.csv\", index=False)\r\n\r\n return None",
"def _computeoutgoing(repo, heads, common):\n cl = repo.changelog\n if common:\n hasnode = cl.hasnode\n common = [n for n in common if hasnode(n)]\n else:\n common = [repo.nullid]\n if not heads:\n heads = cl.heads()\n return discovery.outgoing(repo, common, heads)",
"def _match_identical_nodes(self):\n\n for job_name_b in self._topo_b_nodes:\n for job_name_a in self._unresolved_a_nodes:\n if self._is_node_identical(job_name_a, job_name_b):\n self._identical_nodes[job_name_b] = job_name_a\n self._unresolved_a_nodes.remove(job_name_a)\n self._unresolved_b_nodes.remove(job_name_b)\n break",
"def compose_rewire(phi1, phi2):\n # cannot compose when dimensions are wrong\n assert phi1.indim == phi2.outdim\n\n # it does not make sense to compose with phi1 a variable\n assert not (isinstance(phi1, amnet.Variable))\n\n # compute the list of descendants of phi1 and phi2\n desc1 = descendants(phi1)\n desc2 = descendants(phi2)\n\n # the trees should have no overlaps\n nodeids1 = set([id(d) for d in desc1])\n nodeids2 = set([id(d) for d in desc2])\n assert len(nodeids1) == len(desc1)\n assert len(nodeids2) == len(desc2)\n assert len(nodeids1 & nodeids2) == 0\n\n # determine the variables x1, x2 associated with phi1, phi2\n vars1 = [d for d in desc1 if isinstance(d, amnet.Variable)]\n vars2 = [d for d in desc2 if isinstance(d, amnet.Variable)]\n assert len(vars1) == 1\n assert len(vars2) == 1\n x1 = vars1[0]\n x2 = vars2[0]\n\n # TODO: rewire here"
]
| [
"0.64068115",
"0.60855806",
"0.602546",
"0.60104007",
"0.5977929",
"0.5904355",
"0.57879174",
"0.5684651",
"0.5627905",
"0.56069714",
"0.55558586",
"0.5541558",
"0.5518867",
"0.5504052",
"0.5490307",
"0.5487288",
"0.54431766",
"0.5415038",
"0.5405841",
"0.53724426",
"0.5337163",
"0.53307253",
"0.530864",
"0.52931523",
"0.5266396",
"0.5254309",
"0.5240701",
"0.5203456",
"0.52033854",
"0.51933986"
]
| 0.72850496 | 0 |
Counts the number of common (dependency parse) subgraphs rooted at n1 and n2. This is an implementation of Cm(n1, n2) for dependency structures from Collins and Duffy (2001). Parsing with a Single Neuron. | def count_common_subgraphs(graph1, graph2, n1, n2,
node_attrib='label', edge_attrib='label'):
for graph in (graph1, graph2):
assert nx.is_directed_acyclic_graph(graph)
if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:
return 0
n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)
n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)
if not n1_children or not n2_children:
return 0
else:
result = 1 # neutral element of multiplication
for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,
node_attrib=node_attrib):
result *= (count_common_subgraphs(graph1, graph2,
n1_target, n2_target,
node_attrib='label',
edge_attrib='label') + 2)
return result - 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))",
"def countComponents(self, n: int, edges: List[List[int]]) -> int:\n # BFS O_n time and space\n \n # union find ALG\n uf = UnionFind(n)\n \n for x, y in edges:\n uf.union(x, y)\n \n return len(set(uf.find(x) for x in range(n)))",
"def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)",
"def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result",
"def count_matching_genes(genome1, genome2):\n count = 0\n\n inno1 = max(genome1.nodes.keys())\n inno2 = max(genome2.nodes.keys())\n\n for i in range(max(inno1, inno2) + 1):\n n1 = genome1.nodes.get(i, None)\n n2 = genome2.nodes.get(i, None)\n if not (n1 is None or n2 is None):\n count += 1\n\n inno1 = max(genome1.connections.keys())\n inno2 = max(genome2.connections.keys())\n\n for i in range(max(inno1, inno2) + 1):\n c1 = genome1.connections.get(i, None)\n c2 = genome2.connections.get(i, None)\n if not (c1 is None or c2 is None):\n count += 1\n\n return count",
"def estimate_nc(self):\n mol = self.m\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n #torsions = []\n\n # since mostly the molecules concerned here are amons\n # with N_I <=7, we care about 3- to 7-membered rings\n atsr = _get_ring_nodes(mol,3,7,F)\n #print ' -- atsr = ', atsr\n inrs = np.zeros(self.na, dtype=int) # [this atom is] in [how many] number of rings\n for ia in self.ias_heav:\n _sets = []\n for _ats in atsr:\n if ia in _ats:\n _sets.append(_ats)\n #print ' -- ia, _sets = ', ia, _sets\n inr = find_number_of_unique_set(_sets)\n inrs[ia] = inr\n #print ' -- inrs = ', inrs\n if nmat == 0:\n ns = [1]\n if self.debug: print(' |__ ns = ', ns)\n nc = 1\n self.nc = nc\n else:\n ns = []; patts = []\n scale = 0\n for match in matches:\n j = match[0]\n k = match[1]\n cb = set([j,k])\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = (hj != 2); iok2 = (hj != 3)\n iok3 = (hk != 2); iok4 = (hk != 3)\n if (iok1 and iok2) or (iok3 and iok4): continue\n\n # do not allow internal rotation about two adjacent sp2 atoms are in a ring\n if inrs[j] and inrs[k] and hj==2 and hk==2: continue\n\n pjk = []\n jk = [j,k]\n hsjk = [hj,hk]\n for _ in range(2):\n ia1 = jk[_]\n ia2 = j if ia1==k else k\n hyb = hsjk[_]\n nbrs = np.setdiff1d(self.ias[self.bom[ia1]>0], [ia2])\n ihs = (self.zs[nbrs]==1)\n if np.all(ihs): # case 'a', e.g., 'a1','a2','a3'\n # check ~X-CH3, ~X-NH2, ...\n nh = len(ihs)\n if hyb==3:\n # for rotor X-C in ~X-CH3, one torsion is allowed\n sn = {1:'a3', 2:'a2', 3:'a1'}[nh]\n else: # hyb==2\n sn = {1:'a2', 2:'a1', 3:'a1'}[nh]\n else: # case 'b', e.g., 'b1','b2','b3'\n inr = inrs[ia1]\n if self.cns[ia1]==2 and inr: # e.g., O<, S<, Se<,\n sn = 1\n else:\n if hyb==3:\n sn = 2 if inr <= 1 else 1 # {0:'b3', 1:'b3', 2:'b2', 3:'b1', 4:'b1'}[inr]\n else: # hyb==2:\n sn = 'b2' if inr == 0 else 'b1'\n #sn = {0:'b2', 1:'b1', 2:'b1', 3:'b1'}[inr]\n _patt = '%d%s'%(hyb,sn)\n pjk.append(_patt)\n #print 'j,k = ', j,k, ', pjk = ', pjk\n nci = min([ int(patt[-1]) for patt in pjk ]) # ndic[patt]; sci = scdic[patt]\n if nci > 1:\n ns.append( nci )\n if not np.any([inrs[j],inrs[k]]):\n scale += 1\n if scale == 0: scale = 1\n nc = np.int(np.floor(np.product(ns))) * scale #* 2\n self.nc = nc if nc > 99 else 99\n if self.debug: print(' |__ ns = ', ns)\n if self.debug: print(' |__ scale = %d, nc = %d'%(scale, nc))\n self.ns = np.array(ns, np.int)",
"def __init__(self, graphs: List[Graph], graph_ids: Set[str]) -> None:\n self.graph_ids = graph_ids\n\n # count of link given source & object\n self.c_l_given_so: Dict[Tuple[bytes, bytes], Dict[bytes, int]] = {}\n # count of nodes\n self.c_n: Dict[bytes, int] = {}\n # count of link given source\n self.c_l_given_s: Dict[bytes, Dict[bytes, int]] = {}\n\n # COMPUTE counting\n for g in graphs:\n for link in g.iter_links():\n s = link.get_source_node().label\n o = link.get_target_node().label\n\n # COMPUTE c_l_given_s\n if s not in self.c_l_given_s:\n self.c_l_given_s[s] = {}\n if link.label not in self.c_l_given_s[s]:\n self.c_l_given_s[s][link.label] = 0\n self.c_l_given_s[s][link.label] += 1\n\n # COMPUTE c_l_given_so\n if link.get_target_node().is_data_node():\n # no need to estimate this prob, since it will be result from semantic labeling\n pass\n else:\n if (s, o) not in self.c_l_given_so:\n self.c_l_given_so[(s, o)] = {}\n if link.label not in self.c_l_given_so[(s, o)]:\n self.c_l_given_so[(s, o)][link.label] = 0\n self.c_l_given_so[(s, o)][link.label] += 1\n\n # COMPUTE c_n\n for n in g.iter_nodes():\n if n.label not in self.c_n:\n self.c_n[n.label] = 0\n self.c_n[n.label] += 1\n\n # cached\n self.p_critical_l_given_s = {}\n for s, counts in self.c_l_given_s.items():\n l, c_l = max(counts.items(), key=lambda x: x[1])\n self.p_critical_l_given_s[s] = (l, c_l / self.c_n[s])",
"def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count",
"def cyclomaticComplexity (self):\n self.tarjan()\n return len(self.__edges) - len(self.__nodes) + 2 * len(self.__scc)",
"def solve_part_two(wire_one_map, wire_two_map):\n return min([combined_step_count(intersection_coords, wire_one_map, wire_two_map) for intersection_coords in find_intersection(wire_one_map, wire_two_map)])",
"def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)",
"def get_co_occurrences(self, word1, word2):\n raise NotImplementedError(\"Word2Vec model does not support co-occurrence counting\")",
"def count_disjoint_genes(genome1, genome2):\n count = 0\n\n inno1 = max(genome1.nodes.keys())\n inno2 = max(genome2.nodes.keys())\n\n for i in range(max(inno1, inno2) + 1):\n n1 = genome1.nodes.get(i, None)\n n2 = genome2.nodes.get(i, None)\n if (n1 is None and inno1 > i and n2 is not None) or (\n n2 is None and inno2 > i and n1 is not None\n ):\n count += 1\n\n inno1 = max(genome1.connections.keys())\n inno2 = max(genome2.connections.keys())\n\n for i in range(max(inno1, inno2) + 1):\n c1 = genome1.connections.get(i, None)\n c2 = genome2.connections.get(i, None)\n if (c1 is None and inno1 > i and c2 is not None) or (\n c2 is None and inno2 > i and c1 is not None\n ):\n count += 1\n\n return count",
"def calc_process_cohesion(partitions, graph):\n ch = 0\n for part in partitions:\n crc = calc_community_relation_cohesion(part, graph)\n cic = calc_community_information_cohesion(part, graph)\n ch = ch + (crc * cic)\n ch = ch / len(partitions)\n return ch",
"def number_cross_links(self, node_list1, node_list2):\n if self.directed:\n raise NetworkError(\"Not implemented yet...\")\n\n return self.cross_adjacency(node_list1, node_list2).sum()",
"def common_dependency_targets(graph1, graph2, n1, n2, node_attrib='label',\n edge_attrib='label'):\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n n1_rels, n2_rels = defaultdict(list), defaultdict(list)\n\n for source_set, target_dict in ((n1_children, n1_rels), (n2_children, n2_rels)):\n for rel, target in source_set:\n target_dict[rel].append(target)\n\n common_rels = set(n1_rels) & set(n2_rels) # intersection\n common_deps = set()\n for rel in common_rels:\n for n1_target in n1_rels[rel]:\n n1_target_word = graph1.node[n1_target][node_attrib]\n for n2_target in n2_rels[rel]:\n n2_target_word = graph2.node[n2_target][node_attrib]\n if n1_target_word == n2_target_word:\n common_deps.add( (n1_target, n2_target) )\n return common_deps",
"def nsi_cross_closeness_centrality(self, node_list1, node_list2):\n shortest_paths = self.path_lengths()\n node_weights = self.node_weights\n\n nsi_shortest_paths = shortest_paths + np.eye(len(shortest_paths))\n nsi_shortest_paths[np.isinf(nsi_shortest_paths)] = self.N - 1\n\n nsi_cross_paths = nsi_shortest_paths[node_list1, :][:, node_list2]\n W = sum(node_weights[node_list2])\n return W / np.dot(nsi_cross_paths, node_weights[node_list2])",
"def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))",
"def findComponents(M: List[List[int]]) -> int:\n G = buildGraph(M)\n U = list(range(len(M)))\n n_components = 0\n while U:\n n_components += 1\n [G, U] = DFS(G, U[0], U)\n return n_components",
"def calc_cohesion( g, sg0, sg1, max_csize ) :\n score = 0.0\n n0 = len( sg0 )\n n1 = len( sg1 )\n if (n0 + n1 <= max_csize) :\n boundary_edges = networkx.edge_boundary( g, sg0, sg1 )\n for e in boundary_edges :\n score += g[e[0]][e[1]][\"similarity\"]\n return score / max( n0, n1 )",
"def lca(root, node1, node2):\n\n def lca_helper(root, node1, node2):\n \"\"\"\n Returns: [num_target_nodes, ancestor]\n\n \"\"\"\n if root is None:\n return [0, None]\n\n left_result = lca_helper(root.left, node1, node2)\n if left_result[0] == 2:\n return left_result\n right_result = lca_helper(root.right, node1, node2)\n if right_result[0] == 2:\n return right_result\n\n num_target_nodes = (\n left_result[0] + right_result[0] + (node1, node1).count(root)\n )\n\n return [num_target_nodes, root if num_target_nodes == 2 else None]\n\n return lca_helper(root, node1, node2)[1]",
"def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count",
"def overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n return float(inter)",
"def test_maximum_common_subgraph(graph1, graph2, attrs):\n expected = vermouth.graph_utils.categorical_maximum_common_subgraph(graph1, graph2, attrs)\n\n found = vermouth.graph_utils.maximum_common_subgraph(graph1, graph2, attrs)\n\n note((\"Attributes that must match\", attrs))\n note((\"Graph 1 nodes\", graph1.nodes(data=True)))\n note((\"Graph 1 edges\", graph1.edges))\n note((\"Graph 2 nodes\", graph2.nodes(data=True)))\n note((\"Graph 2 edges\", graph2.edges))\n # We don't find all MCS'es. See comment in\n # vermouth.graph_utils.maximum_common_subgraph\n found = make_into_set(found)\n expected = make_into_set(expected)\n\n if found == expected:\n event(\"Exact match\")\n assert found <= expected",
"def hypergraph_common_edges(u, v, hypergraph):\n total = 0\n for e in hypergraph.edges():\n if u in e.elements and v in e.elements:\n total += 1\n return total",
"def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections",
"def part_two(forms: str) -> int:\n\n count = 0\n groups = parse_forms(forms)\n for members in groups:\n answers = None\n for member in members:\n if answers is None:\n answers = set(member)\n answers.intersection_update(set(member))\n count += len(answers)\n return count",
"def tot_num_of_progenitors_at_z(self, SH, mtree, z1, z2):\n \n for ss in range(z1, z2+1):\n print('redshift:', ss)\n # nodes at redshift ss\n ss_indx = np.where(mtree.data.snapshotNumber.values == ss)\n nodeID = mtree.data.index.values[ss_indx]\n nodeID_desc = mtree.data.descendantIndex.values[ss_indx]\n \n # find number of progenitors for nodes at redshift ss\n if ss != z1:\n progcounts = np.zeros(len(nodeID), dtype=int)\n for ii in range(len(nodeID_past_desc)):\n if nodeID_past_desc[ii] in nodeID:\n indx = np.where(nodeID == nodeID_past_desc[ii])\n progcounts[indx] = count[ii]\n\n nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True)\n nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:]\n \n # add progenitors of progenitors\n if ss != z1:\n for ii in range(len(nodeID)):\n if progcounts[ii] > 1:\n indx = np.where(nodeID_desc_unique == nodeID_desc[ii])\n count[indx] += progcounts[ii] - 1\n\n nodeID_past = nodeID\n nodeID_past_desc = nodeID_desc_unique\n return nodeID, progcounts",
"def _count_concordant_pairs(preds: Tensor, target: Tensor) ->Tensor:\n return torch.cat([_concordant_element_sum(preds, target, i) for i in range(preds.shape[0])]).sum(0)",
"def neigh_comm(n):\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc"
]
| [
"0.714576",
"0.6026852",
"0.59091425",
"0.5810028",
"0.5807212",
"0.5753279",
"0.5661298",
"0.56599116",
"0.56133384",
"0.559882",
"0.5597078",
"0.5583921",
"0.55722636",
"0.55678254",
"0.554752",
"0.5523636",
"0.5515944",
"0.55092716",
"0.55087376",
"0.54997736",
"0.5478763",
"0.54719955",
"0.5444225",
"0.5435276",
"0.54292583",
"0.54068696",
"0.540607",
"0.53985256",
"0.5393789",
"0.53734255"
]
| 0.7671494 | 0 |
Given a graph, returns a set of its dependency rules. If root_node is given, returns only those rules from the subgraph rooted at that node. A dependency rules is represented by a (source node label, edge/relation label, target node label) triple, e.g. ('woman', 'dt', 'the'). Returns | def get_dependency_rules(graph, root_node=None,
node_attrib='label', edge_attrib='label'):
rules = set()
if not root_node:
# root node is the first element in a topological sort of the graph
root_node = nx.topological_sort(graph)[0]
for source, target in nx.dfs_edges(graph, root_node):
rules.add( (ensure_utf8(graph.node[source].get(node_attrib, source)),
ensure_utf8(graph[source][target].get(edge_attrib, '')),
ensure_utf8(graph.node[target].get(node_attrib, target))) )
return rules | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dependencies(graph: Graph, node: Node):\n dependencies: Set[Node] = set()\n def traverse_nodes(nodes):\n for candidate in nodes:\n if candidate not in dependencies:\n dependencies.add(candidate)\n traverse_nodes(graph[candidate])\n traverse_nodes(graph[node])\n dependencies.discard(node)\n return dependencies",
"def includes_all_subgraph_rules(graph, subgraph_candidate,\n subgraph_root_node=None,\n node_attrib='label', edge_attrib='label'):\n graph_rules = get_dependency_rules(graph, node_attrib=node_attrib,\n edge_attrib=edge_attrib)\n subgraph_rules = get_dependency_rules(subgraph_candidate,\n root_node=subgraph_root_node,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib)\n return all(sg_rule in graph_rules for sg_rule in subgraph_rules)",
"def get_dependency_subgraphs(graph, node_attrib='label', edge_attrib='label'):\n assert nx.is_directed_acyclic_graph(graph)\n for n in xrange(graph.number_of_nodes()):\n for subnodes in itertools.combinations(graph.nodes(), n+1):\n subgraph_candidate = graph.subgraph(subnodes)\n if is_dependency_subgraph(graph, subgraph_candidate,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib):\n yield subgraph_candidate",
"def get_dependencies(self, target, graph, dep_list):\n \n if graph == OrderedDict(): return\n if target in graph:\n dep_list.append(graph)\n return dep_list\n for key in graph:\n self.get_dependencies(target, graph[key], dep_list)\n return dep_list",
"def legalize_graph(gm: pippy.fx.GraphModule) -> pippy.fx.GraphModule:\n indeg = {node: 0 for node in gm.graph.nodes}\n new_graph = pippy.fx.Graph()\n # Track how many unfulfilled dependencies each node has\n for node in gm.graph.nodes:\n for user in node.users:\n indeg[user] += 1\n queue: collections.deque = collections.deque()\n # Add all nodes with no dependencies to the queue\n for node in gm.graph.nodes:\n if indeg[node] == 0:\n queue.append(node)\n env: Dict[pippy.fx.Node, pippy.fx.Node] = {}\n # Pop nodes from the queue, and add nodes that have had all their\n # dependencies fulfilled\n while len(queue) > 0:\n cur = queue.popleft()\n env[cur] = new_graph.node_copy(cur, lambda x: env[x])\n for user in cur.users:\n indeg[user] -= 1\n if indeg[user] == 0:\n queue.append(user)\n # If the new graph's size is not as large as the old one, then there must be\n # a cycle (i.e. some node's dependencies were not satisfied.)\n if len(new_graph.nodes) < len(gm.graph.nodes):\n raise RuntimeError(f\"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}\")\n gm.graph = new_graph\n return gm",
"def find_all_cycles(s,graph):\n\n grph = u.edge_to_list_dict(graph)\n node_cnt = len(grph)\n k = z.Int(\"k\")\n syms = [z.Int('node%s'%i) for i in range(node_cnt)]\n\n # s.add(syms[0] == 0) # start node is a 0\n s.add(k < node_cnt)\n s.add(k > 1)\n\n o = z.Optimize()\n\n # for source, sinks in sgraph.s_adj_list():\n for i in range(node_cnt):\n s.add(syms[i] >= 0)\n s.add(syms[i] <= k)\n s.add(z.Or([syms[j] == ((syms[i] + 1) % k) for j in grph[i]]) == (syms[i] == 0))\n\n\n r = []\n m = []\n\n # o.minimize(z.Sum([syms[i] for i in range(node_cnt)]))\n s.add(z.Product([syms[i] for i in range(node_cnt)]) == 0)\n done = False\n while not done:\n if s.check() == z.sat:\n m = s.model()\n r.append(m)\n s.add(k != m[k])\n else:\n done = True\n\n return r",
"def fetchRulesForNode(self, nodeId):\n rules = []\n self.mapNodeDependentSites[nodeId] = set([])\n exactlyMatched = self.findExactlyMatchingRules(nodeId)\n rules.extend(exactlyMatched)\n mergedMatched = self.findMergedMatchingRules(nodeId)\n rules.extend(mergedMatched)\n if len(self.tree.node(nodeId)) > 12:\n rules.extend(self.findDepravedMatchingRules(nodeId))\n # HACK: 2012/10/22\n # elif not mergedMatched and exactlyMatched and len(exactlyMatched) <= 1:\n # if exactlyMatched[0][2][2] < -3: # log(0.05)\n # # Clear rules in this bad situtation.\n # rules = []\n\n # Allow no rules to return, then the decoder will be forced to\n # build translation using CYK.\n if not rules:\n return None, {}\n # if not rules:\n # rules.extend(self.findRecontructMatchingRules(nodeId))\n # if not rules:\n # rules.extend(self.findDepravedMatchingRules(nodeId))\n # # Should rule got here!.\n # assert rules\n\n return rules, self.mapNodeDependentSites[nodeId]",
"def get_children(self, current_rule) -> Set:\n dependencies = set()\n rules = self.rules[current_rule]\n for rule in rules:\n for r in rule:\n dependencies.add(r)\n return dependencies",
"def get_dependency_graph(self):\n return self.graph",
"def depends_on(self, node):\n return sorted(self.__edge_map[node], key=node_key)",
"def dep_tree(self, root):\n \n graph = {}\n for key,extract in self.extracts.items():\n graph[key] = set(extract.get('depends',[]))\n \n def _recurse(node):\n l = set([node])\n for n in graph[node]:\n l = l | _recurse(n)\n \n return l\n \n return _recurse(root)",
"def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs",
"def deps_for(nodes, key):\n\n def _deps(key, path):\n if key not in nodes:\n return [key]\n\n if key in path:\n msg = \"Cycle detected between {} and {}\".format(\n path[0], path[-1])\n raise GraphError(msg)\n\n deps = nodes[key][\"required\"]\n trans = [_deps(dep, path + [key]) for dep in deps]\n return set(util.concat(deps, *trans))\n\n return _deps(key, [])",
"def find_dependencies(root):\n \n symbol_table = create_symbol_table(root)\n\n names = []\n #Set the depth of the root node\n set_depth(root, 0)\n #Stack of nodes to visit\n stack = Stack(root)\n \n #List of (src, dest) of dependencies\n dependency_table = DTable(symbol_table=symbol_table)\n\n for node, children, ntype in stack:\n \n stack.check_and_push_scope()\n\n #A Name is being loaded, therefore \n if ntype == \"Name\" and is_load(children):\n \"\"\"\n \"\"\"\n dependency_table.append( (stack.scopes, node))\n \n elif ntype == \"Assign\":\n #TODO need to add assignments and then revoke them\n #for child in children:\n #print children\n pass\n\n \n elif ntype == \"Attribute\":\n #TODO: attribute chains can be arbitrarily long\n #dep_dest = \"{}.{}\".format(node.value.id, node.attr)\n #print \"{} => {}\".format(scopes_to_str(scopes), dep_dest)\n\n #TODO: Can't just do dependency_table.append( (scopes, node))\n #since the unique_id function won't match the create the dep string like \n #{node.value.id}.{node.attr}.\n #Either generalize unique_id or something else.\n \n #Don't add children\n continue\n \n set_lineno(node, children)\n #Add children to stack\n #This musn't always be performed\n for child in children[::-1]:\n set_depth(child, node.depth + 1)\n stack.append(child)\n\n print \"dependency table is \"\n print dependency_table",
"def topological_sort(self, graph=None):\n if graph is None:\n graph = self.graph\n\n in_degree = {}\n for u in graph:\n in_degree[u] = 0\n\n for u in graph:\n for v in graph[u]:\n in_degree[v] += 1\n\n queue = deque()\n for u in in_degree:\n if in_degree[u] == 0:\n queue.appendleft(u)\n\n l = []\n while queue:\n u = queue.pop()\n l.append(u)\n for v in graph[u]:\n in_degree[v] -= 1\n if in_degree[v] == 0:\n queue.appendleft(v)\n\n if len(l) == len(graph):\n return l\n else:\n raise ValueError(\"graph is not acyclic\")",
"def path(g): #g: graph\n marked = set()\n nodes = set(g.nodes) \n output = list()\n def recursive(g):\n for i in nodes.copy():\n d = dependents(g,i)\n if (not d) or all(dd in marked for dd in d):\n output.append((i,g.nodes[i]['word']))\n marked.add(i)\n nodes.remove(i)\n if nodes==set([0]):\n break\n recursive(g)\n break\n recursive(g)\n return output",
"def findRecontructMatchingRules(self, nodeId):\n tokens = self.tree.node(nodeId)\n assert len(tokens) > 0\n if len(tokens) == 1:\n return []\n nodeTag = self.getTagOfNode(nodeId)\n rc = Reconstructor(self.ruletable, self.model,\n self.sense, tokens, nodeTag)\n rules = rc.parse()\n if rules:\n self.recordDependentSitesForNode(nodeId,[-t for t in tokens if t < 0])\n return rules",
"def find_nodes_by_rule(root_node, select):\n if select(root_node): # pragma: no branch\n yield root_node\n for child in root_node.children:\n yield from find_nodes_by_rule(child, select)",
"def _dag_dependents(db: Redis[bytes], dag_of: hash_t, op_from: hash_t) -> set[hash_t]:\n return __set_as_hashes(\n db, join(DAG_OPERATIONS, dag_of), join(OPERATIONS, op_from, \"children\")\n )",
"def get_all_dependencies_for_task(task):\n from pybuilder.reactor import Reactor\n task_name = task.__name__\n execution_manager = Reactor.current_instance().execution_manager\n task_and_all_dependencies = execution_manager.collect_all_transitive_tasks([task_name])\n return [dependency for dependency in task_and_all_dependencies if dependency.name != task_name]",
"def findDepravedMatchingRules(self, nodeId):\n tokens = list(self.tree.node(nodeId))\n assert len(tokens) > 0\n # Build lexical pseudo rule.\n if len(tokens) == 1:\n target = self.sense.tokens[tokens[0]-1][1]\n pseudoRule = self.ruletable.buildPsuedoRule(target, [])\n return [pseudoRule]\n # Build normal pseudo rule.\n terminalPositions = [n for n in range(len(tokens)) if tokens[n] > 0]\n rules = []\n mapPositionToNewNode = {}\n\n # Create new nodes for terminal tokens.\n for pos in terminalPositions:\n tokenId = tokens[pos]\n newNodeId = max(self.tree.nodes) + 1\n mapPositionToNewNode[pos] = newNodeId\n self.tree.nodes[newNodeId] = [tokenId]\n self.tree.mapParent[newNodeId] = nodeId\n self.tree.mapChildren.setdefault(nodeId, []).append(newNodeId)\n self.sense.mapNodeToMainToken[newNodeId] = tokenId\n self.tree.nodes[nodeId][pos] = -newNodeId\n # Build sites for these rules\n sites = []\n for pos in range(len(tokens)):\n if pos in mapPositionToNewNode:\n sites.append(mapPositionToNewNode[pos])\n elif tokens[pos] < 0:\n sites.append(-tokens[pos])\n # Get pseudo rule.\n pseudoRule = self.ruletable.buildPsuedoRule(None, sites)\n self.recordDependentSitesForNode(nodeId, sites)\n return [pseudoRule]",
"def find_joins(table, root, graph, path=None):\n if table == root:\n return path\n if path is None:\n path = []\n candidates = []\n for parent, columns in graph[table][\"fks\"].items():\n if parent == table:\n continue\n for from_col, to_col, nullable, constraint_name in columns:\n if not nullable:\n found = find_joins(parent, root, graph, path + [(parent, from_col, to_col)])\n if found:\n candidates.append(found)\n candidates.sort(key=len)\n return candidates[0] if candidates else None",
"def compute_dependencies(tables):\n tables = list(tables)\n graph = {}\n def visit_foreign_key(fkey):\n if fkey.use_alter:\n return\n parent_table = fkey.column.table\n if parent_table in tables:\n child_table = fkey.parent.table\n if parent_table is not child_table:\n graph.setdefault(parent_table, []).append(child_table)\n\n for table in tables:\n visitors.traverse(table,\n {'schema_visitor': True},\n {'foreign_key': visit_foreign_key})\n\n graph.setdefault(table, []).extend(table._extra_dependencies)\n\n return graph",
"def get_graph(nodes, edges, deps, sec, containers):\n root = RootResource()\n nodes.append(root)\n # connect accessible nodes to the web\n for node in nodes:\n if node.get_original_type() is not None and \\\n node.get_original_type().is_accessible():\n root.add_child(node)\n\n # Add children for each edge we collected before\n for edge in edges:\n parents = get_resource(nodes, edge['from'])\n children = get_resource(nodes, edge['to'])\n for node in parents:\n for child in children:\n node.add_child(child)\n\n # Same with dependencies\n for dep in deps:\n parents = get_resource(nodes, dep['from'])\n children = get_resource(nodes, dep['to'])\n for node in parents:\n for child in children:\n if node.get_origin() == TARGET and child.get_origin() == TARGET:\n node.add_dependency(child)\n\n # Ensure containers only has Resource as keys and in values by creating more\n # entries if necessary.\n expanded_containers = {}\n for container in containers:\n for c in get_resource(nodes, container):\n if not c in expanded_containers:\n expanded_containers[c] = []\n for x in containers[container]:\n expanded_containers[c].extend(get_resource(nodes, x))\n\n # Same with sec\n expanded_sec = {}\n for security in sec:\n for s in get_resource(nodes, security):\n if not s in expanded_sec:\n expanded_sec[s] = {'from': [], 'to': []}\n for x in sec[security]['from']:\n expanded_sec[s]['from'].extend(get_resource(nodes, x))\n for x in sec[security]['to']:\n expanded_sec[s]['to'].extend(get_resource(nodes, x))\n\n nodes = move_security_nodes(nodes, expanded_sec, expanded_containers)\n nodes = move_container_nodes(nodes, expanded_containers)\n return (nodes, root)",
"def topological_sort(graph, rootKey = None):\n\n\t# Reset's the attribute values of all Nodes in graph to their initialization values.\n\t# Importantly, resets Node.searchStatus to \"undiscovered\" and Node.parent to None.\n\tgraph.reset()\n\n\ttopologicalKeyList = []\n\n\t# time is declared inside a function and so must be made global.\n\tglobal time; time = 0\n\n\t# If a starting root is specified, begin there.\n\tif rootKey is not None:\n\t\ttopological_sort_visit(graph, rootKey, topologicalKeyList)\n\n\t# Visit each undiscovered Node.\n\n\t# The keys are ordered here to enforce an easily predictable traversal.\n\t# This is not necessary and reduces efficiency, but makes testing very straightforward. \n\t# For the purposes of this program this loss in efficiency is acceptable.\n\torderedKeys = list(graph.adjacencyMap.keys()); orderedKeys.sort()\n\tfor key in orderedKeys:\n\t\tif graph.vertexMap[key].searchStatus == \"undiscovered\":\n\t\t\ttopological_sort_visit(graph, key, topologicalKeyList)\n\n\t# Explored and created a forest within graph.\n\treturn topologicalKeyList",
"def compute_graph_levels(deps, uses):\n levels = {k: None for k in deps}\n\n def compute_level(var):\n if not deps[var]:\n return 0\n \n for d in deps[var]:\n if levels[d] is None:\n return None\n\n return max([levels[dep] for dep in deps[var]]) + 1\n\n dirty = set(deps)\n\n while dirty:\n new_dirty = set()\n \n for var in dirty:\n new_level = compute_level(var)\n if new_level != levels[var]:\n levels[var] = new_level\n for use in uses[var]:\n new_dirty.add(use)\n\n dirty = new_dirty\n\n cycle = set(k for k in levels if levels[k] is None)\n if cycle:\n raise Exception(\"Cyclic variable dependencies detected: %s\" % sorted(cycle))\n\n result = {l: [] for l in set(levels.values())}\n for dep, level in levels.items():\n result[level].append(dep)\n return result",
"def find_dependent_tables(tables, graph=None):\n if graph is None:\n graph = _pokedex_graph\n tables = list(tables)\n dependents = set()\n def add_dependents_of(table):\n for dependent_table in graph.get(table, []):\n if dependent_table not in dependents:\n dependents.add(dependent_table)\n add_dependents_of(dependent_table)\n\n for table in tables:\n add_dependents_of(table)\n\n dependents -= set(tables)\n\n return dependents",
"def is_dependency_subgraph(graph, subgraph_candidate,\n node_attrib='label', edge_attrib='label'):\n if len(subgraph_candidate) > 1:\n if nx.is_weakly_connected(subgraph_candidate):\n if includes_all_subgraph_rules(graph, subgraph_candidate,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib):\n return True\n return False",
"def topological_sort(self):\n in_degree = {}\n for node in self.graph:\n in_degree[node] = 0\n\n for from_node in self.graph:\n for to_node in self.graph[from_node]:\n in_degree[to_node] += 1\n\n queue = deque()\n for node in in_degree:\n if in_degree[node] == 0:\n queue.appendleft(node)\n\n sorted_nodes = []\n while queue:\n independent_node = queue.pop()\n sorted_nodes.append(independent_node)\n for next_node in self.graph[independent_node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n queue.appendleft(next_node)\n\n if len(sorted_nodes) == len(self.graph):\n return sorted_nodes\n else:\n raise ValueError('graph is not acyclic')",
"def graph_nodes(dependencies):\n\n return set.union(set(dependencies), *list(dependencies.values()))"
]
| [
"0.64498824",
"0.6156136",
"0.6010759",
"0.58395386",
"0.5650117",
"0.5455874",
"0.5448717",
"0.54439825",
"0.54088295",
"0.53029513",
"0.5287118",
"0.5284669",
"0.5279773",
"0.5236452",
"0.52084494",
"0.5171899",
"0.51481843",
"0.5142804",
"0.5123818",
"0.5096218",
"0.5073595",
"0.5047045",
"0.5027753",
"0.5020375",
"0.5002462",
"0.49855372",
"0.49812335",
"0.49507782",
"0.4944049",
"0.49258998"
]
| 0.8420895 | 0 |
returns True, iff a graph contains all dependency rules of the given subgraph candidate. | def includes_all_subgraph_rules(graph, subgraph_candidate,
subgraph_root_node=None,
node_attrib='label', edge_attrib='label'):
graph_rules = get_dependency_rules(graph, node_attrib=node_attrib,
edge_attrib=edge_attrib)
subgraph_rules = get_dependency_rules(subgraph_candidate,
root_node=subgraph_root_node,
node_attrib=node_attrib,
edge_attrib=edge_attrib)
return all(sg_rule in graph_rules for sg_rule in subgraph_rules) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_dependency_subgraph(graph, subgraph_candidate,\n node_attrib='label', edge_attrib='label'):\n if len(subgraph_candidate) > 1:\n if nx.is_weakly_connected(subgraph_candidate):\n if includes_all_subgraph_rules(graph, subgraph_candidate,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib):\n return True\n return False",
"def is_all_visited(self):\n cond = [node.visited if node and node.belongs and node.valid else True for node in self.nodes.flatten()]\n return all(cond)",
"def _graph_contains_subgraph_edges(graph, subgraph, subgraph_edges):\n for e in subgraph_edges:\n graph_v_id_source = subgraph.get_vertex(e.source_id).get(MAPPED_V_ID)\n graph_v_id_target = subgraph.get_vertex(e.target_id).get(MAPPED_V_ID)\n if not graph_v_id_source or not graph_v_id_target:\n raise VitrageAlgorithmError('Cant get vertex for edge %s' % e)\n found_graph_edge = graph.get_edge(graph_v_id_source,\n graph_v_id_target,\n e.label)\n\n if not found_graph_edge and e.get(NEG_CONDITION):\n continue\n\n if not found_graph_edge or not check_filter(found_graph_edge, e,\n NEG_CONDITION):\n return False\n return True",
"def is_subgraph_of(self, other):\n # If it is already recognized that it is a subgraph this procedure can be skipped.\n if other in self.__supergraph :\n return True\n \n if type(self)!=type(other):\n raise TypeError(\"Only works between graphs.\")\n elif other.return_num_vertices() == 0:\n return False\n elif self.return_num_vertices() == 0:\n return True\n names_to_check = self.return_names()\n # Checks if the vertices are a subset\n if not set(names_to_check).issubset(set(other.return_names())):\n return False\n \n # Traverses each node and checks if the adjacencies build a subset.\n # To do so, the node indices must be replaced by node names.\n # This is laborious, but only needs to be done once.\n for name in names_to_check:\n selflist = set(map(lambda x: (self.return_vertexName(x[0]),x[1]), self.return_adjacencies(self.return_vertexIndex(name))))\n otherlist = set(map(lambda x: (other.return_vertexName(x[0]),x[1]), other.return_adjacencies(other.return_vertexIndex(name))))\n if not selflist.issubset(otherlist):\n return False\n self.__supergraph.append(other)\n \n return True",
"def are_concatenate_on_graph(self, subgraph) -> bool:\n self.visit(subgraph)\n return self.on_graph",
"def graph_issubset(graph1, graph2):\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(graph1, graph2)\n\n return graph1.nodes.issubset(graph2.nodes) and graph1.edges.issubset(graph2.edges)",
"def is_acyclic(graph):\n visited = []\n dfs_seq = DFSIterator(graph)\n\n for node in dfs_seq:\n visited.insert(0, node)\n node_neighbors = graph.get_neighbors(node)\n \n for neighbor in node_neighbors:\n if neighbor in visited:\n return False\n\n return True",
"def is_reachable(graph, root, destination):\n \n if root in graph.nodes and destination in graph.nodes:\n connected_path = dfs(graph, root)\n return destination in connected_path\n else:\n logger.error('Root or destination nodes not in graph')",
"def has_edges(self):\n\n return len(self._edges) > 0",
"def valid(self):\n\t\tfor k, v in self.rules.items():\n\t\t\tfor i in v:\n\t\t\t\tif any([self.valid_rule_1(i), self.valid_rule_2(i), self.valid_rule_3(k, i)]):\n\t\t\t\t\t# print(\"Got a pass\")\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# print(\"Got a fail\")\n\t\t\t\t\treturn False\n\t\t# print(\"CORRECT CFG\")\n\t\treturn True",
"def has_groups(self, resolvables, all=True):\n total_checks = 0\n\n for group in resolvables:\n if self.has_group(group):\n total_checks += 1\n\n if not all:\n return True\n\n return True if all and total_checks == len(resolvables) else False",
"def has_cycles(graph):\n path = set()\n\n def visit(node):\n path.add(node)\n for neighbour in graph.edges[node]:\n if neighbour in path or visit(neighbour):\n return True\n path.remove(node)\n return False\n\n return any(visit(node) for node in graph.nodes)",
"def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n graph = defaultdict(list)\n visited = [0 for _ in range(numCourses)]\n for a, b in prerequisites:\n graph[a].append(b)\n # 0 - Not visited, -1 - Currently being visited, 1 - Visited\n for course in range(numCourses):\n if visited[course] == 1:\n continue\n if self.dfs(course, graph, visited):\n return False\n return True",
"def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True",
"def contains_isomorphic_subgraph_bruteforce(self, H):\n if not isinstance(H, sage.graphs.graph.Graph):\n raise ValueError(\"H is not a graph.\")\n try:\n self.sage_graph().subgraph_search_iterator(H)\n return True\n except StopIteration:\n return False",
"def ok(self, solution):\n if self.constraints is not None:\n for constraint in self.constraints:\n if not constraint(solution):\n return False\n return True",
"def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True",
"def allConstraintsSatisfied(self):\n # loop through all of the constraints\n for constraint in self.constraints:\n # if any of the constraints are not satisfied, then return False\n if (not constraint.satisfied(constraint.tail.value, constraint.head.value)):\n return False\n # no violations, so return true\n return True",
"def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False",
"def __satisfies_necessary_and_sufficient_conditions(g):\n # Condition 0: at least 1 Edge\n if g.get_E() == 0:\n return False\n # Condition 1: indegree(v) == outdegree(v) for every vertex\n for v in range(g.get_V()):\n if g.outdegree() != g.indegree(v):\n return False\n # Condition 2: graph is connected, ignoring isolated vertices\n h = Graph(g.get_V())\n for v in range(g.get_V()):\n for w in g.adj_vertices(v):\n h.add_edge(v, w)\n # check that all non-isolated vertices are connected\n s = DirectedEulerianCycle.__non_isolated_vertex(g)\n bfs = BreadthFirstPaths(h, s)\n for v in range(g.get_V()):\n if h.degree(v) > 0 and not bfs.has_path_to(v):\n return False\n return True",
"def contains(self, g, strict=True):\n if not isinstance(g, Permutation):\n return False\n if g.size != self.degree:\n if strict:\n return False\n g = Permutation(g, size=self.degree)\n if g in self.generators:\n return True\n return bool(self.coset_factor(g.array_form, True))",
"def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))",
"def graph_is_connected(node_count, edges):\n\n disjoint_set = disjoint.DisjointSet(node_count + 1)\n\n for a, b in edges:\n disjoint_set.union(a, b)\n\n # Check if all nodes are part of the same set\n\n root = disjoint_set.root(1)\n\n for i in range(2, node_count + 1):\n if disjoint_set.root(i) != root:\n return False\n\n return True",
"def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))",
"def can_be_applied(self, sdfg: SDFG, subgraph: SubgraphView) -> bool:\n # get graph\n graph = subgraph.graph\n for node in subgraph.nodes():\n if node not in graph.nodes():\n return False\n\n # next, get all the maps\n map_entries = helpers.get_outermost_scope_maps(sdfg, graph, subgraph)\n map_exits = [graph.exit_node(map_entry) for map_entry in map_entries]\n maps = [map_entry.map for map_entry in map_entries]\n\n # 1. basic checks:\n # 1.1 we need to have at least two maps\n if len(maps) <= 1:\n return False\n\n # 1.2 check whether all maps are the same\n base_map = maps[0]\n for map in maps:\n if map.get_param_num() != base_map.get_param_num():\n return False\n if not all([p1 == p2 for (p1, p2) in zip(map.params, base_map.params)]):\n return False\n if not map.range == base_map.range:\n return False\n\n # 1.3 check whether all map entries have the same schedule\n schedule = map_entries[0].schedule\n if not all([entry.schedule == schedule for entry in map_entries]):\n return False\n\n # 2. check intermediate feasiblility\n # see map_fusion.py for similar checks\n # with the restrictions below being more relaxed\n\n # 2.1 do some preparation work first:\n # calculate node topology (see apply for definition)\n try:\n node_config = SubgraphFusion.get_adjacent_nodes(sdfg, graph, map_entries)\n except NotImplementedError:\n return False\n in_nodes, intermediate_nodes, out_nodes = node_config\n\n # 2.2 topological feasibility:\n if not SubgraphFusion.check_topo_feasibility(sdfg, graph, map_entries, intermediate_nodes, out_nodes):\n return False\n\n # 2.3 memlet feasibility\n # For each intermediate node, look at whether inner adjacent\n # memlets of the exiting map cover inner adjacent memlets\n # of the next entering map.\n # We also check for any WCRs on the fly.\n try:\n invariant_dimensions = self.determine_invariant_dimensions(sdfg, graph, intermediate_nodes, map_entries,\n map_exits)\n except NotImplementedError:\n return False\n\n for node in intermediate_nodes:\n upper_subsets = set()\n lower_subsets = set()\n # First, determine which dimensions of the memlet ranges\n # change with the map, we do not need to care about the other dimensions.\n dims_to_discard = invariant_dimensions[node.data]\n # find upper_subsets\n for in_edge in graph.in_edges(node):\n # first check for WCRs\n if in_edge.data.wcr:\n # check whether the WCR is actually produced at\n # this edge or further up in the memlet path. If not,\n # we can still fuse!\n in_in_edge = graph.memlet_path(in_edge)[-2]\n subset_params = set([str(s) for s in in_in_edge.data.subset.free_symbols])\n if any([p not in subset_params for p in in_edge.src.map.params]):\n return False\n if in_edge.src in map_exits:\n for iedge in graph.in_edges(in_edge.src):\n if iedge.dst_conn[2:] == in_edge.src_conn[3:]:\n subset_to_add = dcpy(iedge.data.subset if iedge.data.data ==\n node.data else iedge.data.other_subset)\n\n subset_to_add.pop(dims_to_discard)\n upper_subsets.add(subset_to_add)\n else:\n warnings.warn(\"SubgraphFusion::Nodes between two maps to be\"\n \"fused with incoming edges\"\n \"from outside the maps are not\"\n \"allowed yet.\")\n return False\n\n # find lower_subsets\n for out_edge in graph.out_edges(node):\n if out_edge.dst in map_entries:\n for oedge in graph.out_edges(out_edge.dst):\n if oedge.src_conn and oedge.src_conn[3:] == out_edge.dst_conn[2:]:\n subset_to_add = dcpy(oedge.data.subset if oedge.data.data ==\n node.data else oedge.data.other_subset)\n subset_to_add.pop(dims_to_discard)\n lower_subsets.add(subset_to_add)\n\n # We assume that upper_subsets are contiguous\n # Check for this.\n try:\n contiguous_upper = find_contiguous_subsets(upper_subsets)\n if len(contiguous_upper) > 1:\n return False\n except TypeError:\n warnings.warn('SubgraphFusion::Could not determine whether subset is continuous.'\n 'Exiting Check with False.')\n return False\n\n # now take union of upper subsets\n upper_iter = iter(upper_subsets)\n union_upper = next(upper_iter)\n for subs in upper_iter:\n union_upper = subsets.union(union_upper, subs)\n if not union_upper:\n # something went wrong using union -- we'd rather abort\n return False\n\n # finally check coverage\n # every lower subset must be completely covered by union_upper\n for lower_subset in lower_subsets:\n if not union_upper.covers(lower_subset):\n return False\n\n # 2.4 Check for WCRs in out nodes: If there is one, the corresponding\n # data must never be accessed anywhere else\n intermediate_data = set([n.data for n in intermediate_nodes])\n in_data = set([n.data for n in in_nodes if isinstance(n, nodes.AccessNode)])\n out_data = set([n.data for n in out_nodes if isinstance(n, nodes.AccessNode)])\n\n view_nodes = set()\n for node in chain(in_nodes, out_nodes, intermediate_nodes):\n if isinstance(node, nodes.AccessNode):\n is_view = isinstance(sdfg.data(node.data), dace.data.View)\n for edge in chain(graph.in_edges(node), graph.out_edges(node)):\n for e in graph.memlet_tree(edge):\n if isinstance(e.dst, nodes.AccessNode) and (is_view or isinstance(\n sdfg.data(e.dst.data), dace.data.View)):\n view_nodes.add(e.dst)\n if isinstance(e.src, nodes.AccessNode) and (is_view or isinstance(\n sdfg.data(e.src.data), dace.data.View)):\n view_nodes.add(e.src)\n\n view_data = set([n.data for n in view_nodes])\n\n for out_node in out_nodes:\n for in_edge in graph.in_edges(out_node):\n if in_edge.src in map_exits and in_edge.data.wcr:\n if in_edge.data.data in in_data or in_edge.data.data in intermediate_data or in_edge.data.data in view_data:\n return False\n\n # Check compressibility for each intermediate node -- this is needed in the following checks\n is_compressible = SubgraphFusion.determine_compressible_nodes(sdfg, graph, intermediate_nodes, map_entries,\n map_exits)\n\n # 2.5 Intermediate Arrays must not connect to ArrayViews\n for n in intermediate_nodes:\n if is_compressible[n.data]:\n for out_edge in graph.out_edges(n):\n for e in graph.memlet_tree(out_edge):\n if isinstance(e.dst, nodes.AccessNode) and isinstance(sdfg.data(e.dst.data), dace.data.View):\n warnings.warn(\"SubgraphFusion::View Node Compression not supported!\")\n return False\n for in_edge in graph.in_edges(n):\n for e in graph.memlet_tree(in_edge):\n if isinstance(e.src, nodes.AccessNode) and isinstance(sdfg.data(e.src.data), dace.data.View):\n warnings.warn(\"SubgraphFusion::View Node Compression not supported\")\n return False\n\n # 2.6 Check for disjoint accesses for arrays that cannot be compressed\n if self.disjoint_subsets == True:\n container_dict = defaultdict(list)\n for node in chain(in_nodes, intermediate_nodes, out_nodes):\n if isinstance(node, nodes.AccessNode):\n container_dict[node.data].append(node)\n\n # Check for read/write dependencies between input and output nodes\n outputs = set(n.data for n in out_nodes)\n from dace.transformation.interstate import StateFusion\n for node in in_nodes:\n if isinstance(node, nodes.AccessNode) and node.data in outputs:\n matching_outputs = [n for n in out_nodes if n.data == node.data]\n # Overall ranges overlap: potential data race\n if StateFusion.memlets_intersect(graph, [node], True, graph, matching_outputs, False):\n # Check memlet leaves in more detail\n in_leaves = [l for e in graph.out_edges(node) for l in graph.memlet_tree(e).leaves()]\n out_leaves = [\n l for n in matching_outputs for e in graph.in_edges(n)\n for l in graph.memlet_tree(e).leaves()\n ]\n # All-pairs check. If memlets are equal then there are no races.\n # If they are not, and we cannot know whether they intersect or they do, we do not match.\n for ea in in_leaves:\n for eb in out_leaves:\n if ea.data.src_subset == eb.data.dst_subset: # Equal - no data race\n continue\n return False # Otherwise - potential data race\n\n for (node_data, compressible) in is_compressible.items():\n # we only care about disjoint subsets...\n # 1. if the array is not compressible\n if not compressible:\n # 2. if there are multiple containers appearing pointing to the same data\n if len(container_dict[node_data]) > 1:\n # retrieve map inner access sets of all access nodes appearing within the subgraph\n\n access_set = None\n for node in container_dict[node_data]:\n for e in graph.out_edges(node):\n if e.dst in map_entries:\n # get corresponding inner memlet and join its subset to our access set\n for oe in graph.out_edges(e.dst):\n if oe.src_conn[3:] == e.dst_conn[2:]:\n current_subset = dcpy(oe.data.subset)\n current_subset.pop(invariant_dimensions[node_data])\n\n access_set = subsets.union(access_set, current_subset)\n if access_set is None:\n warnings.warn(\"SubgraphFusion::Disjoint Access found\")\n return False\n for e in graph.in_edges(node):\n if e.src in map_exits:\n for ie in graph.in_edges(e.src):\n # get corresponding inner memlet and join its subset to our access set\n if ie.dst_conn[2:] == e.src_conn[3:]:\n current_subset = dcpy(ie.data.subset)\n current_subset.pop(invariant_dimensions[node_data])\n\n access_set = subsets.union(access_set, current_subset)\n if access_set is None:\n warnings.warn(\"SubgraphFusion::Disjoint Access found\")\n return False\n\n # compare iteration space i_d and i_d-1 in each dimension,\n # where i_d is the iteration variable of the respective dimension\n # if there is any intersection in any dimension, return False\n subset_plus = dcpy(access_set)\n subset_minus = dcpy(access_set)\n repl_dict = {\n symbolic.pystr_to_symbolic(f'{param}'): symbolic.pystr_to_symbolic(f'{param}-1')\n for param in map_entries[0].params\n } # e.g., ['i' -> 'i-1']\n subset_minus.replace(repl_dict)\n\n for (rng, orng) in zip(subset_plus, subset_minus):\n rng_1dim = subsets.Range((rng, ))\n orng_1dim = subsets.Range((orng, ))\n try:\n intersection = rng_1dim.intersects(orng_1dim)\n except TypeError:\n return False\n if intersection is None or intersection == True:\n warnings.warn(\"SubgraphFusion::Disjoint Accesses found!\")\n return False\n\n return True",
"def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False",
"def isAcyclic(self, adjacencyList):\n\n def cyclic(fNode, visited, stack):\n if fNode not in visited:\n visited.add(fNode)\n assert fNode not in stack\n stack.append(fNode)\n for tNode in adjacencyList[fNode]:\n if cyclic(tNode, visited, stack):\n return True\n assert stack.pop() == fNode\n return fNode in stack\n\n visited = set()\n for i in range(len(adjacencyList)):\n if cyclic(i, visited, []):\n return False\n return True",
"def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False",
"def has_cycle(graph):\n ds = DisjointSet()\n\n # creates a set of all graph nodes\n node_set = set()\n for edge in graph:\n node_set.add(edge.node1)\n node_set.add(edge.node2)\n\n for item in node_set:\n ds.make_set(item)\n\n for edge in graph:\n same_set = ds.union(edge.node1, edge.node2)\n if same_set:\n return True\n\n return False",
"def isSubRelation(self, rhs):\n return set(self.iteritems()).issubset(rhs.iteritems())"
]
| [
"0.7680446",
"0.64932567",
"0.616087",
"0.59788954",
"0.59330606",
"0.5835182",
"0.5783123",
"0.5766517",
"0.5704794",
"0.56961316",
"0.5679903",
"0.5674052",
"0.5671569",
"0.56623936",
"0.5658264",
"0.5634861",
"0.5614241",
"0.56140757",
"0.5604958",
"0.55928236",
"0.55737716",
"0.5556866",
"0.55477816",
"0.5521651",
"0.55041045",
"0.54778314",
"0.5477597",
"0.5469599",
"0.546506",
"0.54568434"
]
| 0.7791654 | 0 |
returns True, if the graph contains all of the subgraph candidate's dependency rules. The subgraph must also be (weakly) connected and contain at least two nodes. | def is_dependency_subgraph(graph, subgraph_candidate,
node_attrib='label', edge_attrib='label'):
if len(subgraph_candidate) > 1:
if nx.is_weakly_connected(subgraph_candidate):
if includes_all_subgraph_rules(graph, subgraph_candidate,
node_attrib=node_attrib,
edge_attrib=edge_attrib):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def includes_all_subgraph_rules(graph, subgraph_candidate,\n subgraph_root_node=None,\n node_attrib='label', edge_attrib='label'):\n graph_rules = get_dependency_rules(graph, node_attrib=node_attrib,\n edge_attrib=edge_attrib)\n subgraph_rules = get_dependency_rules(subgraph_candidate,\n root_node=subgraph_root_node,\n node_attrib=node_attrib,\n edge_attrib=edge_attrib)\n return all(sg_rule in graph_rules for sg_rule in subgraph_rules)",
"def is_all_visited(self):\n cond = [node.visited if node and node.belongs and node.valid else True for node in self.nodes.flatten()]\n return all(cond)",
"def are_concatenate_on_graph(self, subgraph) -> bool:\n self.visit(subgraph)\n return self.on_graph",
"def is_subgraph_of(self, other):\n # If it is already recognized that it is a subgraph this procedure can be skipped.\n if other in self.__supergraph :\n return True\n \n if type(self)!=type(other):\n raise TypeError(\"Only works between graphs.\")\n elif other.return_num_vertices() == 0:\n return False\n elif self.return_num_vertices() == 0:\n return True\n names_to_check = self.return_names()\n # Checks if the vertices are a subset\n if not set(names_to_check).issubset(set(other.return_names())):\n return False\n \n # Traverses each node and checks if the adjacencies build a subset.\n # To do so, the node indices must be replaced by node names.\n # This is laborious, but only needs to be done once.\n for name in names_to_check:\n selflist = set(map(lambda x: (self.return_vertexName(x[0]),x[1]), self.return_adjacencies(self.return_vertexIndex(name))))\n otherlist = set(map(lambda x: (other.return_vertexName(x[0]),x[1]), other.return_adjacencies(other.return_vertexIndex(name))))\n if not selflist.issubset(otherlist):\n return False\n self.__supergraph.append(other)\n \n return True",
"def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise",
"def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))",
"def allConstraintsSatisfied(self):\n # loop through all of the constraints\n for constraint in self.constraints:\n # if any of the constraints are not satisfied, then return False\n if (not constraint.satisfied(constraint.tail.value, constraint.head.value)):\n return False\n # no violations, so return true\n return True",
"def _graph_contains_subgraph_edges(graph, subgraph, subgraph_edges):\n for e in subgraph_edges:\n graph_v_id_source = subgraph.get_vertex(e.source_id).get(MAPPED_V_ID)\n graph_v_id_target = subgraph.get_vertex(e.target_id).get(MAPPED_V_ID)\n if not graph_v_id_source or not graph_v_id_target:\n raise VitrageAlgorithmError('Cant get vertex for edge %s' % e)\n found_graph_edge = graph.get_edge(graph_v_id_source,\n graph_v_id_target,\n e.label)\n\n if not found_graph_edge and e.get(NEG_CONDITION):\n continue\n\n if not found_graph_edge or not check_filter(found_graph_edge, e,\n NEG_CONDITION):\n return False\n return True",
"def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True",
"def has_edges(self):\n\n return len(self._edges) > 0",
"def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True",
"def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False",
"def graph_issubset(graph1, graph2):\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(graph1, graph2)\n\n return graph1.nodes.issubset(graph2.nodes) and graph1.edges.issubset(graph2.edges)",
"def graph_is_connected(node_count, edges):\n\n disjoint_set = disjoint.DisjointSet(node_count + 1)\n\n for a, b in edges:\n disjoint_set.union(a, b)\n\n # Check if all nodes are part of the same set\n\n root = disjoint_set.root(1)\n\n for i in range(2, node_count + 1):\n if disjoint_set.root(i) != root:\n return False\n\n return True",
"def is_connected(self):\n vs = self.vertices()\n visited = self.bfs(vs[0])\n return len(visited) == len(vs)",
"def bfs_is_connected(self):\n q = Queue.Queue()\n origins = [self.vertices()[0]]\n traveled = set(origins)\n while origins:\n for o in origins:\n for child in self.out_vertices(o):\n if child not in traveled:\n q.put(child)\n traveled.add(child)\n\n origins = []\n while not q.empty():\n origins.append(q.get())\n if len(traveled) == self.order():\n return True\n return False",
"def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)",
"def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n graph = defaultdict(list)\n visited = [0 for _ in range(numCourses)]\n for a, b in prerequisites:\n graph[a].append(b)\n # 0 - Not visited, -1 - Currently being visited, 1 - Visited\n for course in range(numCourses):\n if visited[course] == 1:\n continue\n if self.dfs(course, graph, visited):\n return False\n return True",
"def validate_graph(self) -> bool:\n return True",
"def IsCyclic(self):\n\n visited = [False for i in range(self.NodesCount())]\n \n for idx in range(1, self.NodesCount()+1): \n if not visited[idx-1]: \n if self.IsCyclicRec(idx, visited, -1): \n return True\n return False",
"def __satisfies_necessary_and_sufficient_conditions(g):\n # Condition 0: at least 1 Edge\n if g.get_E() == 0:\n return False\n # Condition 1: indegree(v) == outdegree(v) for every vertex\n for v in range(g.get_V()):\n if g.outdegree() != g.indegree(v):\n return False\n # Condition 2: graph is connected, ignoring isolated vertices\n h = Graph(g.get_V())\n for v in range(g.get_V()):\n for w in g.adj_vertices(v):\n h.add_edge(v, w)\n # check that all non-isolated vertices are connected\n s = DirectedEulerianCycle.__non_isolated_vertex(g)\n bfs = BreadthFirstPaths(h, s)\n for v in range(g.get_V()):\n if h.degree(v) > 0 and not bfs.has_path_to(v):\n return False\n return True",
"def is_acyclic(graph):\n visited = []\n dfs_seq = DFSIterator(graph)\n\n for node in dfs_seq:\n visited.insert(0, node)\n node_neighbors = graph.get_neighbors(node)\n \n for neighbor in node_neighbors:\n if neighbor in visited:\n return False\n\n return True",
"def cycleCheck(*args, all: bool=True, children: bool=True, dag: bool=True, evaluation:\n bool=True, firstCycleOnly: bool=True, firstPlugPerNode: bool=True,\n lastPlugPerNode: bool=True, list: bool=True, listSeparator: AnyStr=\"\", parents:\n bool=True, secondary: bool=True, timeLimit: time=None, q=True, query=True,\n **kwargs)->Union[bool, Any]:\n pass",
"def contains_isomorphic_subgraph_bruteforce(self, H):\n if not isinstance(H, sage.graphs.graph.Graph):\n raise ValueError(\"H is not a graph.\")\n try:\n self.sage_graph().subgraph_search_iterator(H)\n return True\n except StopIteration:\n return False",
"def has_groups(self, resolvables, all=True):\n total_checks = 0\n\n for group in resolvables:\n if self.has_group(group):\n total_checks += 1\n\n if not all:\n return True\n\n return True if all and total_checks == len(resolvables) else False",
"def is_tree_decomp(graph, decomp):\n for x in graph.nodes():\n appear_once = False\n for bag in decomp.nodes():\n if x in bag:\n appear_once = True\n break\n ok_(appear_once)\n\n # Check if each connected pair of nodes are at least once together in a bag\n for (x, y) in graph.edges():\n appear_together = False\n for bag in decomp.nodes():\n if x in bag and y in bag:\n appear_together = True\n break\n ok_(appear_together)\n\n # Check if the nodes associated with vertex v form a connected subset of T\n for v in graph.nodes():\n subset = []\n for bag in decomp.nodes():\n if v in bag:\n subset.append(bag)\n sub_graph = decomp.subgraph(subset)\n ok_(nx.is_connected(sub_graph))",
"def has_cycle(graph):\n ds = DisjointSet()\n\n # creates a set of all graph nodes\n node_set = set()\n for edge in graph:\n node_set.add(edge.node1)\n node_set.add(edge.node2)\n\n for item in node_set:\n ds.make_set(item)\n\n for edge in graph:\n same_set = ds.union(edge.node1, edge.node2)\n if same_set:\n return True\n\n return False",
"def is_clique(G,S): #set of vertices where every pair in the set forms an edge \n for v in S:\n if list(set(S)&set(neighbors(G,v))) != []: #[] <-- empty list\n return False\n \n return True",
"def ok(self, solution):\n if self.constraints is not None:\n for constraint in self.constraints:\n if not constraint(solution):\n return False\n return True",
"def has_multiple_edges(self):\n # Create a list of edge 2-tuples (a, b)\n edge_tuples = [(e['from_id'], e['to_id']) for e in self._edges]\n if len(edge_tuples) > len(set(edge_tuples)): # Do 'real' multiple edges exist?\n return True\n\n # Create a list of edge 2-tuples (a, b) with a <= b\n edge_tuples = [(min(e['from_id'], e['to_id']), max(e['from_id'], e['to_id'])) for e in self._edges]\n edge_tuples_set = set(edge_tuples)\n\n if len(edge_tuples) == 2 * len(edge_tuples_set): # This only happens if for each edge (a, b) also (b, a) exists\n return False\n else:\n # The set kicks out duplicate edges => less edges in the set means there were multiple edges\n return len(edge_tuples) > len(edge_tuples_set)"
]
| [
"0.7630183",
"0.68231225",
"0.64021075",
"0.6321216",
"0.6030239",
"0.60031056",
"0.60005057",
"0.59945226",
"0.5991897",
"0.59907985",
"0.5969818",
"0.59631526",
"0.59560096",
"0.5941007",
"0.5937184",
"0.593042",
"0.5920621",
"0.5900079",
"0.589987",
"0.5840563",
"0.58382285",
"0.58282894",
"0.57971835",
"0.5777965",
"0.57525253",
"0.57407737",
"0.573901",
"0.5728113",
"0.57129425",
"0.5701535"
]
| 0.7640155 | 0 |
naively generate all (dependency parse) subgraphs of a given graph by iterating through all possible node combinations. HIGHLY INEFFICIENT. | def get_dependency_subgraphs(graph, node_attrib='label', edge_attrib='label'):
assert nx.is_directed_acyclic_graph(graph)
for n in xrange(graph.number_of_nodes()):
for subnodes in itertools.combinations(graph.nodes(), n+1):
subgraph_candidate = graph.subgraph(subnodes)
if is_dependency_subgraph(graph, subgraph_candidate,
node_attrib=node_attrib,
edge_attrib=edge_attrib):
yield subgraph_candidate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs",
"def get_subgraphs(self):\n if hasattr(self, 'subgraphs') is False:\n self._get_subgraphs()\n for sub in self.subgraphs:\n yield sub",
"def build_drop_fullgraphs(self, do_subgraph=False, graph_lib='pygraphviz'):\n if 'pygraphviz' == graph_lib:\n G = pgv.AGraph(strict=True, directed=True)\n else:\n G = nx.Graph()\n do_subgraph = False\n subgraph_dict = defaultdict(list) # k - node-ip, v - a list of graph nodes\n oid_gnid_dict = dict()\n\n for i, oid in enumerate(self.pg_spec.keys()):\n oid_gnid_dict[oid] = str(i)\n logger.info(\"oid to gid mapping done\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n ip = dropspec['node']\n subgraph_dict[ip].append(gid)\n if (dropspec['type'] == 'app'):\n G.add_node(gid, shape='rect', label='')#, fixedsize=True, hight=.05, width=.05)\n elif (dropspec['type'] == 'plain'): #parallelogram\n G.add_node(gid, shape='circle', label='')#, fixedsize=True, hight=.05, width=.05)\n logger.info(\"Graph nodes added\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n if (dropspec['type'] == 'app'):\n ds_kw = 'outputs' #down stream key word\n elif (dropspec['type'] == 'plain'):\n ds_kw = 'consumers'\n else:\n ds_kw = 'None'\n if (ds_kw in dropspec):\n for doid in dropspec[ds_kw]:\n G.add_edge(gid, oid_gnid_dict[doid])\n logger.info(\"Graph edges added\")\n\n if (do_subgraph):\n for i, subgraph_nodes in enumerate(subgraph_dict.values()):\n # we don't care about the subgraph label or rank\n subgraph = G.add_subgraph(subgraph_nodes, label='%d' % i, name=\"cluster_%d\" % i, rank=\"same\")\n subgraph.graph_attr['rank']='same'\n logger.info(\"Subgraph added\")\n\n return G",
"def strongly_connected_component_subgraphs(G):\n cc=strongly_connected_components(G)\n graph_list=[]\n for c in cc:\n graph_list.append(G.subgraph(c))\n return graph_list",
"def getSubGraphs(self):\n\n self.subGraphs = []\n visited = {}\n queue = deque()\n\n for s in self.nodes:\n\n if s not in visited:\n subGraph = SubGraph()\n self.subGraphs.append(subGraph)\n else:\n continue\n\n queue.append(s)\n\n while len (queue) > 0:\n outDegree = 0\n node = queue.popleft()\n if node in visited:\n continue\n\n for u in node.adj:\n if u not in visited:\n outDegree += 1\n queue.append(u)\n\n\n subGraph.addNode(node, outDegree)\n visited[node] = True",
"def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list",
"def parse_graphs(self, graph_iterator):\n #filter_cache = make_graph_filter_cache() \n for graph in graph_iterator: \n raw_chart = self.parse(graph)\n # The raw chart contains parser operations, need to decode the parse forest from this \n res = td_chart_to_cky_chart(raw_chart)\n yield res",
"def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))",
"def find_all_subgraphs(graph, match):\n if not match:\n return []\n\n return _find(graph, match, {}, match)",
"def graphs(n):\n assert n >= 0\n\n # Special cases for small vertex sets\n if n <= 2:\n if n == 0:\n yield []\n return\n if n == 1:\n yield [ [] ]\n return\n if n == 2:\n yield [ [], [] ]\n yield [ [1], [0] ]\n return\n\n # Make generator yielding all possible edges.\n # If a < b < c, then we yield edge (a,b) before (a,c).\n # If b < c < a, then we yield edge (b,a) before (c,a).\n # As a result, we will construct graph representations having sorted\n # adjacency lists, which our graph representation requires.\n alledges = ( (j, i) for i in range(n) for j in range(i) )\n\n # Generate all graphs\n # We unroll the portion of the loop dealing with edges (0,1), (0,2)\n for edges in powerset(itertools.islice(alledges, 2, None)):\n # unrolling for edges (0,1) and (0,2)\n g = [ [] for v in range(n) ]\n for e in edges:\n g[e[0]].append(e[1])\n g[e[1]].append(e[0])\n yield g\n\n # Add edge (0,1)\n g2 = g[:]\n # We can't use .insert below, since we don't want to modify the\n # items in the list we have (shallowly!) copied.\n g2[0] = [1]+g2[0]\n g2[1] = [0]+g2[1]\n yield g2\n\n # Add edge (0,2)\n g3 = g[:]\n g3[0] = [2]+g3[0]\n g3[2] = [0]+g3[2]\n yield g3\n\n # Add edges (0,1) and (0,2)\n g4 = g3[:] # Not copied from g!\n g4[0] = [1]+g4[0]\n g4[1] = [0]+g4[1]\n yield g4",
"def topological_nodes_generator(graph, reverse=...):\n ...",
"def graphs_iso(n):\n assert n >= 0\n for g in unique_iso(graphs(n)):\n yield g",
"def gen_graph(self):",
"def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2",
"def simple_cycles(g: Graph) -> List[List[Branch]]:\n # Make copy because the graph gets altered during the algorithm\n graph_copy = g.copy()\n branch_map = {}\n copy_result = list()\n\n # Create map to allow returning original branches\n for branch in g.branches:\n branch_map[branch.id] = branch\n\n # Yield every elementary cycle in python graph G exactly once\n # Expects a dictionary mapping from vertices to iterables of vertices\n def _unblock(thisnode, blocked, B):\n stack = set([thisnode])\n while stack:\n node = stack.pop()\n if node in blocked:\n blocked.remove(node)\n stack.update(B[node])\n B[node].clear()\n sccs = [(graph_copy, scc) for scc in\n strongly_connected_components(graph_copy)]\n while sccs:\n current_graph, scc = sccs.pop()\n startnode = scc.pop()\n path = [startnode.id]\n pathBranches = []\n blocked = set()\n closed = set()\n blocked.add(startnode.id)\n B = defaultdict(set)\n stack = [(startnode, list(startnode.outgoing))]\n while stack:\n thisnode, nbrs = stack[-1]\n if nbrs:\n branch = nbrs.pop()\n nextnode = branch.end\n if nextnode.id == startnode.id:\n result = pathBranches[:]\n result.append(branch)\n copy_result.append(result)\n closed.update(path)\n elif nextnode.id not in blocked:\n path.append(nextnode.id)\n pathBranches.append(branch)\n stack.append((nextnode,\n list(nextnode.outgoing)))\n closed.discard(nextnode.id)\n blocked.add(nextnode.id)\n continue\n if not nbrs:\n if thisnode.id in closed:\n _unblock(thisnode.id, blocked, B)\n else:\n for nbr in map(lambda x: x.end,\n thisnode.outgoing):\n if thisnode.id not in B[nbr.id]:\n B[nbr.id].add(thisnode.id)\n stack.pop()\n path.pop()\n if (pathBranches):\n pathBranches.pop()\n startnode.remove()\n subgraph = current_graph.subgraph(set(scc))\n new_scc = strongly_connected_components(subgraph)\n sccs.extend([(subgraph, scc) for scc in new_scc])\n\n for loop in copy_result:\n yield list(map(lambda b: branch_map[b.id], loop))",
"def _build_graph(self):\n pass",
"def allGraphs(date):\n g = getGraph()\n for uri, label, filename in subgraphs(date):\n if not label:\n label = \"(no label provided)\"\n g.parse(filename, format=SUBGRAPH_FORMAT)\n return g",
"def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))",
"def merge_graphs(\n graph: DiGraph,\n ) -> Tuple[list[str], GraphAccess, Generator[Tuple[str, GraphAccess], None, None]]:\n\n # Find merge nodes: all nodes that are marked as merge node -> all children (merge roots) should be merged.\n # This method returns all merge roots as key, with the respective predecessor nodes as value.\n def merge_roots() -> dict[str, set[str]]:\n graph_root = GraphAccess.root_id(graph)\n merge_nodes = [node_id for node_id, data in graph.nodes(data=True) if data.get(\"merge\", False)]\n assert len(merge_nodes) > 0, \"No merge nodes provided in the graph. Mark at least one node with merge=true!\"\n result: dict[str, set[str]] = {}\n for node in merge_nodes:\n # compute the shortest path from root to here and sort out all successors that are also predecessors\n pres: set[str] = reduce(lambda res, p: res | set(p), all_shortest_paths(graph, graph_root, node), set())\n for a in graph.successors(node):\n if a not in pres:\n result[a] = pres\n return result\n\n # Walk the graph from given starting node and return all successors.\n # A successor which is also a predecessor is not followed.\n def sub_graph_nodes(from_node: str, parent_ids: set[str]) -> set[str]:\n to_visit = [from_node]\n visited: set[str] = {from_node}\n\n def successors(node: str) -> list[str]:\n return [a for a in graph.successors(node) if a not in visited and a not in parent_ids]\n\n while to_visit:\n to_visit = reduce(lambda li, node: li + successors(node), to_visit, [])\n visited.update(to_visit)\n return visited\n\n # Create a generator for all given merge roots by:\n # - creating the set of all successors\n # - creating a subgraph which contains all predecessors and all succors\n # - all predecessors are marked as visited\n # - all predecessor edges are marked as visited\n # This way it is possible to have nodes in the graph that will not be touched by the update\n # while edges will be created from successors of the merge node to predecessors of the merge node.\n def merge_sub_graphs(\n root_nodes: dict[str, set[str]], parent_nodes: set[str], parent_edges: set[Tuple[str, str, str]]\n ) -> Generator[Tuple[str, GraphAccess], None, None]:\n all_successors: Set[str] = set()\n for root, predecessors in root_nodes.items():\n successors: set[str] = sub_graph_nodes(root, predecessors)\n # make sure nodes are not \"mixed\" between different merge nodes\n overlap = successors & all_successors\n if overlap:\n raise AttributeError(f\"Nodes are referenced in more than one merge node: {overlap}\")\n all_successors |= successors\n # create subgraph with all successors and all parents, where all parents are already marked as visited\n sub = GraphAccess(graph.subgraph(successors | parent_nodes), root, parent_nodes, parent_edges)\n yield root, sub\n\n roots = merge_roots()\n parents: set[str] = reduce(lambda res, ps: res | ps, roots.values(), set())\n parent_graph = graph.subgraph(parents)\n graphs = merge_sub_graphs(roots, parents, set(parent_graph.edges(data=\"edge_type\")))\n return list(roots.keys()), GraphAccess(parent_graph, GraphAccess.root_id(graph)), graphs",
"def combinations(graph, all_combs, all_costs, all_values, start, prev_cost, prev_value, prev_nodes):\n for ii in range(start, graph.size):\n # combination\n nodes = prev_nodes + [ii]\n all_combs.append(nodes)\n # cost\n cost = prev_cost + graph.node_weights[ii][0]\n all_costs.append(cost)\n # value\n value = prev_value + graph.node_weights[ii][1] - graph.node_weights[ii][0]\n for node in prev_nodes: # complementarity\n for adjacent in graph.graph[node]:\n if adjacent[0] == ii:\n value += adjacent[1]\n all_values.append(value)\n # recurse\n combinations(graph, all_combs, all_costs, all_values, ii+1, cost, value, nodes)",
"def global_decomposition(iterable = None):\n\tfor graph in iterable:\n\t\tyield graph_decomposition(graph = graph)",
"def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"",
"def sub_graph_merging(self):",
"def build_graph(self):\n pass",
"def chang_graphs():\n g1 = Graph(\"[}~~EebhkrRb_~SoLOIiAZ?LBBxDb?bQcggjHKEwoZFAaiZ?Yf[?dxb@@tdWGkwn\",\n loops=False, multiedges=False)\n g2 = Graph(\"[~z^UipkkZPr_~Y_LOIiATOLBBxPR@`acoojBBSoWXTaabN?Yts?Yji_QyioClXZ\",\n loops=False, multiedges=False)\n g3 = Graph(\"[~~vVMWdKFpV`^UGIaIERQ`\\DBxpA@g`CbGRI`AxICNaFM[?fM\\?Ytj@CxrGGlYt\",\n loops=False, multiedges=False)\n return [g1,g2,g3]",
"def gen_graph():\n if config_pagination:\n gdata = tgraph.call_graph(offset=offset, limit=limit)\n else:\n gdata = tgraph.call_graph(start=start, end=end, contineous=contineous)\n\n for data in gdata:\n yield data",
"def graphs_conn_iso(n):\n def graphs_conn_helper(n):\n for oldg in graphs_conn_iso(n-1):\n for s in powerset(range(n-1)):\n if s == ():\n continue\n g = oldg + [list(s)]\n for v in s:\n g[v] = g[v] + [n-1]\n # NOT g[v] += ... or g[v].append(...)\n # to avoid changing items in oldg\n yield g\n\n assert n >= 0\n if n >= 3:\n for g in unique_iso(graphs_conn_helper(n)):\n yield g\n elif n == 2:\n yield [ [1], [0] ]\n elif n == 1:\n yield [ [] ]\n else: # n == 0\n yield []",
"def generateNumsets(G):\n # paths = []\n #\n # path = [0]\n # for edge in nx.dfs_edges(G, 0):\n # if edge[0] == path[-1]:\n # path.append(edge[1])\n # else:\n # paths.append(path)\n # search_index = 2\n # while search_index <= len(path):\n # if edge[0] == path[-search_index]:\n # path = path[:-search_index + 1] + [edge[1]]\n # break\n # search_index += 1\n # else:\n # raise Exception(\"Wrong path structure?\", path, edge)\n # paths.append(path)\n # return paths\n\n \"\"\"\n Trying to use itertools LMAO\n \"\"\"\n # paths = []\n #\n # for path in itertools.combinations(G.nodes, 5):\n # paths.append(path)\n # return paths\n\n \"\"\"\n Generating paths using graph\n \"\"\"\n paths = []\n n = len(G.nodes)\n for source in range(n):\n for target in range(source+1, n):\n paths.extend([path for path in nx.all_simple_paths(G, source=source, target=target)])\n return paths\n\n # return paths",
"def loops(graph = None):\n\tunknown_structs = []\n\tcompound_structs = []\n\tloops_dict = create_components_dict()\n\tfor subgraph in nx.connected_component_subgraphs(graph):\n\t\tif subgraph.number_of_nodes() < 3:\n\t\t\tunknown_structs.append(subgraph)\n\t\telse:\n\t\t\tif connectivity_threshold(graph = subgraph) > 2 or loop_type(graph= subgraph) == 'NA':\n\t\t\t\tcompound_structs.append(subgraph)\n\t\t\telse:\n\t\t\t\tloops_dict[loop_type(graph= subgraph)].append(subgraph)\n\treturn loops_dict",
"def generate_graph(comments):\n for comment in comments:\n topic['all_comments'].append(comment)\n parent = topic['graph'].setdefault(comment['parentId'], [])\n parent.append(comment['id'])\n generate_graph(comment['children'])"
]
| [
"0.71944106",
"0.658198",
"0.6509555",
"0.64392537",
"0.640974",
"0.6388047",
"0.6185399",
"0.61799556",
"0.61649305",
"0.61399496",
"0.6015442",
"0.60139364",
"0.6005733",
"0.6004381",
"0.599298",
"0.59746885",
"0.59744376",
"0.59430206",
"0.5927151",
"0.5919803",
"0.5884523",
"0.58819",
"0.5869414",
"0.58639544",
"0.58635885",
"0.5859531",
"0.5849843",
"0.58130944",
"0.58050823",
"0.5800776"
]
| 0.6886045 | 1 |
dp[i][j] represent the length of longest palindrome subseq from s[i] to s[j] so the answer is dp[0][n 1] | def longestPalindromeSubseq(self, s: str) -> int:
n = len(s)
dp = [[1] * n for _ in range(n)]
for length in range(1, n + 1):
for i in range(n - length + 1):
j = i + length - 1
print(i, j)
if length == 1:
dp[i][j] = 1
elif s[i] == s[j]:
dp[i][j] = dp[i + 1][j - 1] + 2
else:
dp[i][j] = max(dp[i][j - 1], dp[i + 1][j])
return dp[0][n - 1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def longestPalindrome(self, s):\n if not s:\n return 0\n #init i and list\n i = 0\n singles = []\n while i < len(s):\n count_in_singles = singles.count(s[i])\n if count_in_singles > 0:\n singles.pop(singles.index(s[i]))\n else:\n singles.append(s[i])\n i += 1\n if len(singles) > 0:\n len_longest_palindrome = len(s) - len(singles) + 1\n else:\n len_longest_palindrome = len(s)\n return len_longest_palindrome",
"def get_longest_palindrome_subsequence(s):\n\n if not s:\n return ()\n\n n = len(s)\n lengths = __calc_lps_length(s, n)\n\n max_len = lengths[0][n]\n p = [None for _ in range(0, max_len)]\n __build_lps(s, lengths, 0, n, p, 0, len(p))\n\n return tuple(p)",
"def longest_palindromic_substring(s):\n longest = s[0] if len(s) > 0 else \"\"\n for i in range(len(s)):\n j = len(s)\n while s[i] in s[i+1:j] and j <= len(s):\n j = s[i + 1:j].rfind(s[i]) + i + 2\n print(i, j)\n if is_palindrome(s[i:j]) and len(longest) < len(s[i:j]):\n longest = s[i:j]\n j = len(s) + 1\n else:\n j -= 1\n if len(s) - len(longest) <= i:\n break\n return longest",
"def longestPalindrome(self, s: str) -> int:\n # approach #1 -- using hashset\n # approach 2 -- using hashmap\n hashmap = defaultdict(int)\n odd = 0\n out = 0\n for char in s:\n hashmap[char] += 1\n\n for key, val in hashmap.items():\n if val % 2 == 1:\n odd = 1\n out += (val -1)\n else:\n out += val\n return out +odd",
"def get_longest_palindrome(v,s):\n m,j = max( (x,i) for i,x in enumerate(v) )\n start = j//2 - m//2\n return s[start:start+m]",
"def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]",
"def countPalindromicSubsequences(self, S):\n if not S:\n return 0\n\n ways = [[0] * len(S) for i in range(len(S))]\n\n # base cases: for subarray of length 1 and 2\n for i in range(len(S)):\n ways[i][i] = 1\n if i < len(S) - 1:\n ways[i][i+1] = 2\n\n for ll in range(3, len(S)+1):\n for i in range(len(S) - ll + 1):\n j = ll + i - 1\n if S[i] != S[j]:\n ways[i][j] = ways[i+1][j] + ways[i][j-1] - ways[i+1][j-1]\n else:\n l = i + 1\n while l < j and S[l] != S[i]:\n l += 1\n r = j - 1\n while r > i and S[r] != S[j]:\n r -= 1\n\n if l < r:\n ways[i][j] = 2 * ways[i+1][j-1] - ways[l+1][r-1]\n elif l == r :\n ways[i][j] = 2 * ways[i+1][j-1] + 1\n else:\n ways[i][j] = 2 * ways[i+1][j-1] + 2\n return ways[0][len(S)-1] % (10**9 + 7)",
"def longest_palindrome(string):\n if is_palindrome(string):\n return string\n\n max_len = len(string) - 1\n\n while max_len > 0:\n i = 0\n while (i + max_len) <= len(string):\n sub = string[i:(i + max_len)]\n if is_palindrome(sub):\n return sub\n i += 1\n max_len -= 1",
"def longestPalindrome(self, s: str) -> str:\n # Basic validations\n self.validator.validate_word_length(s)\n self.validator.validate_word_char_types(s)\n\n start = 0\n end = 0\n for i in range(len(s)):\n left_pos = self.expand_around_center(s, i, i)\n right_pos = self.expand_around_center(s, i, i + 1)\n length = max(left_pos, right_pos)\n if length > (end - start):\n start = i - (length - 1) // 2\n end = i + (length // 2)\n\n max_palindrome = list(s)[start : end + 1]\n return ''.join(max_palindrome)",
"def computeLongestPalindromeLength(text):\n # BEGIN_YOUR_CODE (our solution is 19 lines of code, but don't worry if you deviate from this)\n def isPal(t):\n if len(t) == 1:\n return True\n if t[0] == t[-1] and isPal(t[1:-1]):\n return True\n return False\n for size in range(len(text),0,-1):\n for c in combinations(s,size):\n if isPal(''.join(l for l in c)):\n return ''.join(l for l in c)\n # END_YOUR_CODE",
"def longest_palindrome(a):\n # This is some spaghetti if I've ever seen some. In the case where the whole work is\n # a palindrome, we can go ahead and skip. BUT ONLY IN THAT INSTANCE BECAUSE IT'S BAD\n if palindrome_golf(a):\n return a\n\n # Enter the bad\n max_so_far = ''\n for length in range(1,len(a)-1):\n start = 0\n while start <= len(a) - 1:\n sub = a[start:start+length]\n cur = palindrome_golf(sub)\n if cur is True and len(sub) > len(max_so_far):\n max_so_far = sub\n start += 1\n\n return max_so_far",
"def computeLongestPalindromeLength(text):\n n = len(text)\n if n == 0:\n return 0\n\n maxLen = [[0] * n for _ in range(n)]\n for i in reversed(range(n)):\n maxLen[i][i] = 1\n for j in range(i + 1, n):\n if text[i] == text[j]:\n maxLen[i][j] = maxLen[i+1][j-1] + 2\n else:\n maxLen[i][j] = max(maxLen[i+1][j], maxLen[i][j-1])\n return maxLen[0][n-1]",
"def find_longest_palindromic_string(text):\n n = len(text)\n start = 0\n max_len = 1\n matrix = [[False for _ in range(n)] for _ in range(n)]\n # all palindrome of length 1\n for i in range(n):\n matrix[i][i] = True\n # check palindrome of length 2\n for i in range(n-1):\n if text[i] == text[i + 1]:\n matrix[i][i + 1] = True\n start = i\n max_len = 2\n # check palindrome of length 3 or more\n for length in range(3, n):\n for i in range(n-length+1):\n j = i + length - 1\n if text[i] == text[j] and matrix[i+1][j-1]:\n matrix[i][j] = True\n start = i\n max_len = length\n return text[start: start + max_len]",
"def LPSubsequence(str):\n if str is None or len(str) == 0:\n return \"\"\n\n sl = len(str) # sl is string length\n\n # Create a table to store results of subproblems\n L = [[0 for x in range(sl)] for x in range(sl)]\n\n # Create palindrome of 1 for each character in input string (a)\n for i in range(sl):\n L[i][i] = str[i]\n\n # cl is check string length\n for cl in range(2, sl + 1):\n for start in range(sl - cl + 1):\n stop = start + cl - 1\n first = str[start]\n last = str[stop]\n if first == last and cl == 2:\n L[start][stop] = first * 2\n elif first == last:\n L[start][stop] = first + L[start + 1][stop - 1] + last\n else:\n L[start][stop] = LPSubsequenceLongest(\n L[start][stop - 1], L[start + 1][stop])\n\n return L[0][sl - 1]",
"def isSubsequenceDP(self, s: str, t: str) -> bool:\n if len(s) == 0:\n return True\n if len(t) == 0:\n return False\n\n n = len(s)\n m = len(t)\n\n dp = [[0 for i in range(m)] for _ in range(n)]\n dp[0][0] = 1 if s[0] == t[0] else 0\n\n for i in range(1, n):\n if s[i] == t[0]:\n dp[i][0] = 1\n else:\n dp[i][0] = dp[i-1][0]\n\n for i in range(1, m):\n dp[0][i] = 1 if t[i] == s[0] else dp[0][i-1]\n\n for i in range(1, n):\n for j in range(1, m):\n if s[i] == t[j]:\n dp[i][j] = max(dp[i-1][j-1] + 1, dp[i][j-1])\n else:\n dp[i][j] = dp[i][j-1]\n\n return dp[-1][-1] == len(s)",
"def substrCount(n, s):\r\n lst = []\r\n character = s[0]\r\n count = 1\r\n result = 0\r\n for i in range(1, n):\r\n if s[i] == character:\r\n count += 1\r\n else:\r\n lst.append((character, count))\r\n character = s[i]\r\n count = 1\r\n lst.append((character, count))\r\n\r\n for tpl in lst:\r\n \"\"\"calculate all possible palindromes created from same characters that are close to each other\r\n E.g: aaa => 6 possibles (3*4//2 = 6)\r\n \"\"\"\r\n result += tpl[1] * (tpl[1] + 1) // 2\r\n\r\n for i in range(1, len(lst) - 1):\r\n if lst[i - 1][0] == lst[i + 1][0] and lst[i][1] == 1:\r\n \"\"\"\r\n check palindromes created from 3 tuples with a different character in between\r\n \"\"\"\r\n result += min(lst[i - 1][1], lst[i + 1][1])\r\n\r\n return result",
"def shortestPalindrome(self, string):\n\t\tif not string:\n\t\t\treturn ''\n\t\tright = 0\n\t\tcenter = 0\n\t\tdataString = string\n\t\tstring = self.interleave(string)\n\t\tdps = [0] * len(string)\n\t\t\n\t\tfor i in range(1, len(string)):\n\t\t\tmirror = 2*center - i\n\t\t\tif i + dps[mirror] < right:\n\t\t\t\tdps[i] = dps[mirror]\n\t\t\telse:\n\t\t\t\tcenter = i\n\t\t\t\tmirror = 2 * center - right - 1\n\t\t\t\tridx = right + 1\n\t\t\t\t# print (i, center, right, mirror)\n\t\t\t\twhile ridx < len(string):\n\t\t\t\t\tif mirror >= 0 and string[mirror] == string[ridx]:\n\t\t\t\t\t\tmirror -= 1\n\t\t\t\t\t\tridx += 1\n\t\t\t\t\telse :\n\t\t\t\t\t\tbreak\n\t\t\t\t# print (i, center, ridx, mirror)\n\t\t\t\tright = ridx - 1\n\t\t\t\tdps[i] = right - i\n\n\t\t# print (string)\n\t\tidx = len(dps) - 1\n\t\twhile idx > 0:\n\t\t\tif idx == dps[idx]:\n\t\t\t\tbreak\n\t\t\tidx -= 1\n\t\t# print (idx, 'idx')\n\t\treturn dataString[:idx - 1 - len(dataString): -1] + dataString",
"def longestIncreasingSubsequence(nums):\n if not nums:\n return 0\n \n dp = [None] * len(nums)\n dp[0] = 1\n maxans = 1\n \n for i in range(1, len(dp)):\n maxval = 0\n for j in range(0, i):\n if nums[i] > nums[j]:\n maxval = max(maxval, dp[j])\n \n dp[i] = maxval + 1\n maxans = max(maxans, dp[i])\n \n return maxans",
"def computeLongestPalindrome(text):\n # BEGIN_YOUR_CODE (our solution is 19 lines of code, but don't worry if you deviate from this)\n cache = {}\n def recurse(m,n, text):\n if (m,n) in cache:\n result = cache[(m,n)]\n elif m==n:\n result = 1\n elif m > n:\n result = 0\n elif text[m] == text[n]:\n result = 2 + recurse(m+1, n-1, text)\n else:\n result1 = recurse(m+1,n, text)\n result2 = recurse(m,n-1, text)\n result = max(result1, result2)\n\n \n cache[(m,n)] = result\n return result\n\n return recurse(0, len(text)-1, text)\n\n\n\n # END_YOUR_CODE",
"def my_dp_O_N_FAILED(self, s):\n if s[0] == '0':\n return 0\n if len(s) == 1:\n return 1\n \n \"\"\" Following len(s) >= 2 \"\"\"\n n = len(s)\n dp = [0] * n\n \n dp[0] = 1\n # dp[1] = \n dp[1] = 1 if (int(s[0:2]) > 26 or s[1] == '0') else 2\n if 1 <= int(s[0:2]) <= 26:\n dp[1] = 2\n else:\n if s[1] == '0':\n dp[1] = 0\n else:\n dp[1] = 1\n \n for i in range(2, len(s)+1):\n if s[i] != '0':\n dp[i] += dp[i-1]\n if 10 <= int(s[i-1 : i+1]) <= 26:\n dp[i] += dp[i-2]\n return dp[n-1]",
"def longestAwesome(self, s: str) -> int:\n\n # So we are moving right, and reducing length by 1\n # for every time we move right - we start from the longest substring that can be formed to lowest one\n # So the moment, we find something we can instantly breal\n\n max_length = 0\n\n if s == s[::-1]:\n return len(s)\n\n for i in range(0, len(s)):\n left = i\n right = len(s)\n\n if right - left > max_length:\n\n while right > left:\n\n candidate = s[left:right]\n # print(f\"The candidate is: {candidate}\")\n ctr = Counter(candidate)\n\n # initial base check\n odd_cnt = 0\n fl = False\n for k, v in ctr.items():\n if v & 1:\n odd_cnt += 1\n if odd_cnt > 1:\n fl = True\n break\n\n if not fl:\n if max_length < (right - left):\n max_length = right - left\n # max_length = max(max_length, len(candidate))\n\n right -= 1\n\n return max_length",
"def is_palindrome_v3(s):\n i = 0\n j = len(s)-1\n\n while i < j and s[i] == s[j]:\n i = i + 1\n j = j -1\n\n return j <= i",
"def is_palindrome_v2(s):\n n = len(s)\n\n return s[:n/2] == reverse(s[n-n/2:])",
"def probl4():\n\n largest_palindrome = 0\n for i in xrange(101, 1000):\n for j in xrange(101, 1000):\n output = i * j\n if str(output) == str(output)[::-1] and \\\n output > largest_palindrome:\n largest_palindrome = output\n return largest_palindrome",
"def propagate(v,s):\n # if the palindrome at the current center expends until the\n # end of s, we have a certain length for all subpalindromes\n # to the right of this center\n suffix = is_suffix(v,s)\n\n # consider the length of the palindrome\n # centered at v's tail in order to further populate\n # v with what we can for sure predict\n l = v[-1]\n c = len(v)-1\n for j in reversed(range(c-l+1, c)):\n pre = is_prefix(c,j,v)\n if not pre or suffix: # we have a definite answer\n v.append(v[j])\n else: # pre and no suffix\n break\n # if it's a prefix, we can only give lower\n # bounds, but we'd have to check the palindrome's\n # length anyway, so we don't get much.",
"def reverse(s):\n flag=0\n n=len(s)\n for i in range(len(s)):\n if s[i]!=s[n-i-1]:\n flag=1\n return -1\n return 1",
"def lengthOfLongestSubstringTwoDistinct(self, s):\n if not s:\n return 0\n\n counters = collections.Counter()\n\n start = end = 0\n max_len = -1\n char_count = 0\n while end < len(s):\n if counters[s[end]] == 0:\n char_count += 1\n counters[s[end]] += 1\n\n while char_count > 2:\n counters[s[start]] -= 1\n if counters[s[start]] == 0:\n char_count -= 1\n start += 1\n\n if max_len < end - start + 1:\n max_len = end - start + 1\n\n return max_len",
"def lengthOfLIS(self, nums: List[int]) -> int:\n# time complexity: O(n^2), space complexity: O(n)\n# this is inspired by the solution provided by the question.\n# dp\n# the idea is to use a list longest to record say i-th element in nums, if as the last of the longest possible subsquence, how long the subsquence would be.\n \n\n# time complexity: O(nlogn), space complexity: O(n)\n# dp with binary search\n# the key idea is to use a list to store the longest possible sequence, but the element in the list is not necessarily correct. Every element say record_long[i] in the list means the end of longest subsequence of length i+1\n# this is inspired by @bolinq in the discussion area.\n import bisect\n record_long = []\n for num in nums:\n index = bisect.bisect_left(record_long, num)\n if index == len(record_long):\n record_long.append(num)\n else:\n record_long[index] = num\n \n return len(record_long)",
"def num_palindrome():\n nums = map(str, range(1000000))\n odo = []\n for i in range(len(nums)):\n if len(nums[i]) < 6:\n odo.append('0'*(6-len(nums[i])) + nums[i])\n elif len(nums[i]) == 6:\n odo.append(nums[i])\n \n for i in range(len(odo)-3): \n first = odo[i][2:] == odo[i][:1:-1]\n second = odo[i+1][1:] == odo[i+1][:0:-1]\n third = odo[i+2][1:5] == odo[i+2][4:0:-1]\n fourth = odo[i+3][:] == odo[i+3][::-1]\n if first & second & third & fourth:\n print 'A possible odometer reading is '+odo[i]",
"def lengthOfLongestSubstring(self, s):\n longest = 0\n i = 0\n j = 0\n n = len(s)\n seen = {}\n while i < n and j < n:\n c = s[j]\n if c in seen:\n i = seen[c] + 1\n seen[c] = j\n j += 1\n longest = max(longest, j-i)\n\n return longest"
]
| [
"0.7569497",
"0.739337",
"0.7330967",
"0.7084973",
"0.7000375",
"0.6921926",
"0.68645984",
"0.68183255",
"0.6751637",
"0.66429085",
"0.65716267",
"0.6562179",
"0.65591437",
"0.6377998",
"0.6305075",
"0.6299351",
"0.62958795",
"0.62937003",
"0.6278404",
"0.6270836",
"0.6138917",
"0.61287856",
"0.6106785",
"0.60841626",
"0.60622746",
"0.60477006",
"0.60366255",
"0.59925044",
"0.5978511",
"0.59773505"
]
| 0.8747813 | 0 |
Sets GoogleMediaItem object attributes to values given in dictionary | def from_dict(self, dictionary):
required_keys = ['filename', 'id', 'baseUrl']
assert all(key in list(dictionary.keys()) for key in required_keys), \
'Dictionary missing required key. GoogleMediaItem.from_dict() ' \
'requires keys: {}'.format(required_keys)
self.name = dictionary['filename']
self.id = dictionary['id']
self.base_url = dictionary['baseUrl'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])",
"def _set_attributes(self):",
"def __setitem__(self, name, value):\n self.gattrs[name] = value",
"def __setitem__(self, key, item):\n self.attrib[key] = item",
"def __setitem__(self, key, item):\n self.attrib[key] = item",
"def _set_attrs(ds, **attrs_map):\n for key in attrs_map:\n val = attrs_map[key] # Use Python 2/3 agnostic style\n ds.attrs[key] = val",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def __setitem__(self, item, value):\r\n debug.write(\"[SourceRPG] Assigning attribute %s with the value of %s to player %s\" % (item, value, self.name), 3)\r\n if item in self.currentAttributes:\r\n debug.write(\"Value is in current attributes, assign to the currentAttributes dict\", 4)\r\n self.currentAttributes[item] = value\r\n elif item in self.currentSkills or item in skills:\r\n debug.write(\"Value is in skills, assign to the currentSkills dict\", 4)\r\n self.currentSkills[item] = value\r\n else:\r\n debug.write(\"Value is not in any dictionary, assign to the custom playerAttributes dict\", 4)\r\n self.playerAttributes[item] = value\r\n debug.write(\"[SourceRPG] Value updated\", 3)",
"def set_media(link):\r\n results = {}\r\n make_link_info_job(results, link, g.useragent)()\r\n update_link(link, *results[link])",
"def changeattributes(self, *_, inplace = True, **kwa) -> 'TracksDict':\n assert len(_) == 0\n this = self if inplace else self.clone()\n for track in this.values():\n for i, j in kwa.items():\n setattr(track, i, j)\n return this",
"def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict):\n for key, value in harmonized_metadata.items():\n setattr(sample, key, value)",
"def updateFromDict(self, data):\n for key, value in data.items():\n setattr(self, key, value)",
"def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)",
"def updateAttrs(self, kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)",
"def set_metadata(d, metadata):\n for data in metadata:\n d = set_dict_attrs(d, {'.'.join(data.keys()[0].split('.')[1:]): data.values()[0]})\n return d",
"def setItunesAttribute(self,key,value):\n self.itunesAttributes[key] = value",
"def mset(self, dict_attrs, **kwargs):\n\n query_str = '?xsiType=%s' % (quote(self._get_datatype())) + ''.join(['&%s=%s' % (quote(path),\n quote(val)\n )\n for path, val in dict_attrs.items()\n ]\n )\n\n put_uri = self._eobj._uri + query_str\n\n self._intf._exec(put_uri, 'PUT', **kwargs)",
"def change_metadata(self, **kwargs):\n metadata = self.state.get_player_state(PLAYER_IDENTIFIER)\n\n # Update saved metadata\n for key, value in kwargs.items():\n setattr(metadata, key, value)\n\n # Create a temporary metadata instance with requested parameters\n change = PlayingState(**kwargs)\n self.state.item_update(change, PLAYER_IDENTIFIER)",
"def store_metadata(media_entry, metadata):\n # Let's pull out the easy, not having to be converted ones first\n stored_metadata = dict(\n [(key, metadata[key])\n for key in [\n \"videoheight\", \"videolength\", \"videowidth\",\n \"audiorate\", \"audiolength\", \"audiochannels\", \"audiowidth\",\n \"mimetype\"]\n if key in metadata])\n\n # We have to convert videorate into a sequence because it's a\n # special type normally..\n\n if \"videorate\" in metadata:\n videorate = metadata[\"videorate\"]\n stored_metadata[\"videorate\"] = [videorate.num, videorate.denom]\n\n # Also make a whitelist conversion of the tags.\n if \"tags\" in metadata:\n tags_metadata = metadata['tags']\n\n # we don't use *all* of these, but we know these ones are\n # safe...\n tags = dict(\n [(key, tags_metadata[key])\n for key in [\n \"application-name\", \"artist\", \"audio-codec\", \"bitrate\",\n \"container-format\", \"copyright\", \"encoder\", \n \"encoder-version\", \"license\", \"nominal-bitrate\", \"title\",\n \"video-codec\"]\n if key in tags_metadata])\n if 'date' in tags_metadata:\n date = tags_metadata['date']\n tags['date'] = \"%s-%s-%s\" % (\n date.year, date.month, date.day)\n\n # TODO: handle timezone info; gst.get_time_zone_offset +\n # python's tzinfo should help\n if 'datetime' in tags_metadata:\n dt = tags_metadata['datetime']\n tags['datetime'] = datetime.datetime(\n dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),\n dt.get_minute(), dt.get_second(),\n dt.get_microsecond()).isoformat()\n \n metadata['tags'] = tags\n\n # Only save this field if there's something to save\n if len(stored_metadata):\n media_entry.media_data_init(\n orig_metadata=stored_metadata)",
"def set_dict(self, dic): # -> None:\n ...",
"def set_attributes(self, settings):\n\n for key, value in settings.items():\n self.__dict__[key] = value",
"def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id): # lint-amnesty, pylint: disable=arguments-differ\n def _internal_method(all_assets, asset_idx):\n \"\"\"\n Update the found item\n \"\"\"\n if asset_idx is None:\n raise ItemNotFoundError(asset_key)\n\n # Form an AssetMetadata.\n mdata = AssetMetadata(asset_key, asset_key.path)\n mdata.from_storable(all_assets[asset_idx])\n mdata.update(attr_dict)\n\n # Generate a Mongo doc from the metadata and update the course asset info.\n all_assets.insert_or_update(mdata)\n return all_assets\n\n self._update_course_assets(user_id, asset_key, _internal_method)",
"def set_attr_from_dict(self, dictionary):\n for key in dictionary:\n self.__setattr__(key, dictionary.get(key))",
"def test_fromDictEntitiesMediaSizes(self):\n\n status = platform.Status.fromDict(self.data[1])\n mediaItem = status.entities.media[0]\n self.assertEquals(700, mediaItem.sizes.large.w)\n self.assertEquals(466, mediaItem.sizes.large.h)\n self.assertEquals('fit', mediaItem.sizes.large.resize)",
"def set_attrs(self, location, attr_dict):\r\n for attr in attr_dict.iterkeys():\r\n if attr in ['_id', 'md5', 'uploadDate', 'length']:\r\n raise AttributeError(\"{} is a protected attribute.\".format(attr))\r\n asset_db_key = self.asset_db_key(location)\r\n # FIXME remove fetch and use a form of update which fails if doesn't exist\r\n item = self.fs_files.find_one(asset_db_key)\r\n if item is None:\r\n raise NotFoundError(asset_db_key)\r\n self.fs_files.update(asset_db_key, {\"$set\": attr_dict})",
"def setAttributes(self, attrDict):\n self.graph.saveExtendedAttributes(self.entityId, attrDict)",
"def set_properties(struct):",
"def enable_meta_data(item) : \n assert(has_value(item))\n\n if not item_has_meta(item) : \n \n # Wrap simple types to allow attributes to be added to them.\n if isinstance(item, str) : item = str_wrapper(item)\n elif isinstance(item, float) : item = float_wrapper(item)\n elif isinstance(item, int) : item = int_wrapper(item)\n elif isinstance(item, list) : item = list_wrapper(item)\n elif isinstance(item, dict) : item = dict_wrapper(item)\n \n setattr(item, META_ATTRIBUTE, Properties())\n \n return item",
"def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'"
]
| [
"0.60931814",
"0.6062052",
"0.60582787",
"0.5953111",
"0.5953111",
"0.5855602",
"0.5801648",
"0.5776378",
"0.5689186",
"0.5677398",
"0.5652272",
"0.5617211",
"0.56029904",
"0.55923235",
"0.5581952",
"0.55641544",
"0.5533538",
"0.5525152",
"0.55217475",
"0.5504772",
"0.54800445",
"0.54646546",
"0.5441283",
"0.54376256",
"0.5418796",
"0.54165787",
"0.54065335",
"0.540339",
"0.53933907",
"0.5391684"
]
| 0.6487751 | 0 |
Name of stages to initialize as string or list (not tuple!) or None to skip. Skip single axes with "" or None as item in the list. | def stages(self, stages):
if stages is None:
self._stages = None
else:
self._stages = stages if isinstance(stages, list) else [stages] * len(self.pidevice.allaxes)
debug('ControllerStartup.stages = %s', itemstostr(self._stages)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skip_stage(stage):\n stage_values[stage] = 0\n stage_maxes[stage] = 0\n canvas.itemconfig(\n 'text_' + stage,\n text=stage_names[stage] + ': ' + trans_skipped,\n )\n bar_length(stage, 1) # Force stage to be max filled.\n canvas.delete('tick_' + stage)\n canvas.update()",
"def optional_input_names(self) -> List[Union[str, int]]:\n return [x.name or i for i, x in enumerate(self.inputs) if x.optional]",
"def step_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"step_name\")",
"def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]",
"def stage_name(self) -> str:\n return self._values.get(\"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_mesh_axis(self):\n stage_mesh_axis = None\n p = self.params\n if p.mesh_axis_names is not None:\n stage_mesh_axis = base_layer.to_partition_spec(\n p.weight_split_dims_mapping.stages, p.mesh_axis_names\n )[0]\n return stage_mesh_axis",
"def generateStageInList( self, StageInData ):\n StageInList = [ StageInData['EmptyFileName'] ]\n return StageInList",
"def stage_list(args):\n\n for stage in args.stages:\n print stage",
"def available_frames(self):\n if self._pipeline:\n #return [getattr(frame[0], \"name\", frame[0]) for frame in self._pipeline]\n return [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline ]\n else:\n return None",
"def stage_name(stages_dir):\r\n\r\n print(\"\\nLEVELS AVAILABLE:\"\r\n \"\\n\")\r\n stages_dir = os.path.expanduser(stages_dir)\r\n os.chdir(stages_dir)\r\n stage_lst = next(os.walk('.'))[1]\r\n os.chdir(config_writer.tool_path)\r\n\r\n for name in stage_lst:\r\n print(name)\r\n while True:\r\n stg_nm = input(\"\\nEnter stage name: \")\r\n if stg_nm not in stage_lst:\r\n print(\"\\nStage name not available! Try again.\")\r\n else:\r\n break\r\n\r\n return stg_nm",
"def test_par_names_scalar_nonscalar():\n spec = {\n 'channels': [\n {\n 'name': 'channel',\n 'samples': [\n {\n 'name': 'goodsample',\n 'data': [1.0],\n 'modifiers': [\n {'type': 'normfactor', 'name': 'scalar', 'data': None},\n {'type': 'shapesys', 'name': 'nonscalar', 'data': [1.0]},\n ],\n },\n ],\n }\n ]\n }\n\n model = pyhf.Model(spec, poi_name=\"scalar\")\n assert model.config.par_order == [\"scalar\", \"nonscalar\"]\n assert model.config.par_names == [\n 'scalar',\n 'nonscalar[0]',\n ]",
"def stage_name(self) -> str:\n return self._stage_name",
"def optional(name):",
"def _stage_inputs(stage, phase):\n\n def arrayify(martian_io_field):\n \"\"\"Convert the type of a Martian input field to an array of that type.\n\n This is necessary for the join phase.\n \"\"\"\n return mro_parser.MartianIOField(\n martian_io_field.modifier,\n martian_io_field.type + '[]',\n martian_io_field.name,\n martian_io_field.help)\n\n def add_tag_to_name(martian_io_field, tag):\n return mro_parser.MartianIOField(\n martian_io_field.modifier,\n martian_io_field.type,\n martian_io_field.name + '_' + tag,\n martian_io_field.help)\n\n if phase == 'split':\n return stage.inputs\n elif phase == 'main':\n return stage.inputs + stage.splits\n elif phase == 'join':\n # The inputs to join are arrays of the split and output fields since it's pulling\n # together outputs of multiple main steps.\n # Also, \"split\" and \"output\" need to be added to the field names or there are collisions\n return stage.inputs + \\\n [add_tag_to_name(arrayify(s), \"split\") for s in stage.splits] + \\\n [add_tag_to_name(arrayify(s), \"output\") for s in stage.outputs]",
"def stage_states(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExecutionStageStateArgs']]]]:\n return pulumi.get(self, \"stage_states\")",
"def default_states_preprocessor(states):\n if len(states) == 1:\n np_states = np.expand_dims(states[0], 0)\n else:\n np_states = np.array([np.array(s, copy=False) for s in states], copy=False)\n return tf.convert_to_tensor(np_states)",
"def required_input_names(self) -> List[Union[str, int]]:\n return [x.name or i for i, x in enumerate(self.inputs) if not x.optional]",
"def test_no_prior(self):\n dim = Dimension(\"yolo\", None)\n print(dim._prior_name)\n assert dim.prior is None\n assert dim._prior_name == \"None\"",
"def get_steps_names(self) -> List[str]:\n return [step.Name.lower() for step in self.Sequence if isinstance(step, Step) and step.Name != \"\"]",
"def GroundExcelStartStageFileNameVector(builder, numElems):\n return StartStageFileNameVector(builder, numElems)",
"def setup(self, stage: Optional[str] = None) -> None:",
"def testGetStageNamesSmoke(self):\n stage = self.ConstructStage()\n self.assertEqual(stage.GetStageNames(), ['Builder'])",
"def list_stages():\n for name in Manager.STAGES:\n click.echo('{}'.format(name))",
"def get_fill_stages(self) -> Tuple[str]:\n return self._fill_stages",
"def layer_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"layer_name\")",
"def stage(self, stage):\n if stage is None:\n raise ValueError(\"Invalid value for `stage`, must not be `None`\")\n allowed_values = [\"starting\", \"completed\", \"in_progress\"]\n if stage not in allowed_values:\n raise ValueError(\n \"Invalid value for `stage` ({0}), must be one of {1}\"\n .format(stage, allowed_values)\n )\n\n self._stage = stage",
"def get_filename_from_stage(stage: str, device: TorchDevice) ->str:\n if stage not in [PREPROCESSOR, PREDICTOR, POSTPROCESSOR]:\n raise ValueError(f'Invalid stage: {stage}.')\n if stage == PREDICTOR:\n return f'inference_{stage}-{device}.pt'\n else:\n return f'inference_{stage}.pt'",
"def optional_data_names(\n cls, train: bool = True, inference: bool = False\n ) -> Tuple[str, ...]:\n raise NotImplementedError"
]
| [
"0.51637655",
"0.49777758",
"0.49761155",
"0.49150392",
"0.4887601",
"0.48852953",
"0.48852953",
"0.48343307",
"0.47870418",
"0.47764853",
"0.47706214",
"0.47684795",
"0.47631693",
"0.476252",
"0.46925962",
"0.4682446",
"0.466207",
"0.46271232",
"0.46085522",
"0.4608084",
"0.45821467",
"0.45713642",
"0.4531872",
"0.44739202",
"0.44523492",
"0.4446397",
"0.44345996",
"0.4428583",
"0.4422661",
"0.44155416"
]
| 0.5163081 | 1 |
Name of axes as list of strings or None. | def axesnames(self):
return self._axesnames | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def axesNames(self, data, info):\n return []",
"def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()",
"def customAxisNames(self):\n return []",
"def axis_name(self):\n return self._axis_name",
"def _get_axes_numbers(self, axes):\n if axes is None:\n return [0, 1]\n\n if isinstance(axes, str):\n return [self._get_axis_number(axes)]\n elif hasattr(axes, '__len__'):\n return [self._get_axis_number(ax) for ax in axes]\n return [axes]",
"def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]",
"def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1",
"def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')",
"def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))",
"def get_axis_name(self, axis_id):\n if isinstance(axis_id, str):\n if axis_id in self.axes_names:\n return axis_id\n else:\n return None\n assert np.isreal(axis_id) and np.round(axis_id) == axis_id\n if axis_id >= 0 and axis_id < self.get_ndims():\n return self.axes_names[axis_id]\n else:\n return None",
"def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels",
"def get_data(self):\n return [self.axes]",
"def axes(self):\n return self._axes",
"def axes(self):\n return self._axes",
"def axes(self) -> np.ndarray: # array[Axes]\n return self._axes",
"def getaxeslist(pidevice, axes):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = pidevice.axes if axes is None else axes\n if not axes:\n return []\n if not isinstance(axes, (list, set, tuple)):\n axes = [axes]\n return list(axes) # convert tuple to list",
"def canonicalize_axis_name(axis_name):\n if not axis_name:\n return []\n if (isinstance(axis_name, str) or\n not isinstance(axis_name, collections.Iterable)):\n return [axis_name]\n return list(axis_name)",
"def listInputDeviceAxes(*args, **kwargs)->List[AnyStr]:\n pass",
"def endog_names(self):\n return self.data.ynames",
"def series_names(self):\r\n return self.names",
"def figure_names(self) -> List[str]:\n return self._db_data.figure_names",
"def get_axes(self) -> VGroup:\n return self.axes",
"def get_axes(self) -> VGroup:\n return self.axes",
"def findaxisbyname(self, *args, **kwargs):\n return _coordsys.coordsys_findaxisbyname(self, *args, **kwargs)",
"def exog_names(self):\n return self.data.xnames",
"def name(self):\n return self._dimensions",
"def series_axis(self):\n return self.container['series_axis']",
"def getPlot(self):\n return self.axes",
"def _declare_auto_axes_idx(self):\n if not self.axes_idx:\n self.axes_idx = BiMapping(to_first=range(len(self.name_elements)), to_second=range(len(self.name_elements)))",
"def getElementName(self):\n return _libsbml.Dimensions_getElementName(self)"
]
| [
"0.7990254",
"0.69446003",
"0.69080454",
"0.65960664",
"0.65699506",
"0.6543819",
"0.6498505",
"0.64880943",
"0.6442048",
"0.6376614",
"0.63446236",
"0.6276407",
"0.6262554",
"0.6262554",
"0.6227108",
"0.6180861",
"0.6115117",
"0.6089225",
"0.5930945",
"0.5925059",
"0.5902349",
"0.58496207",
"0.58496207",
"0.58483154",
"0.5826402",
"0.5816212",
"0.57476246",
"0.5674617",
"0.56584525",
"0.56550294"
]
| 0.79235536 | 1 |
Call INI command if available. | def callini(self):
debug('ControllerStartup.callini()')
if not self.pidevice.HasINI() or self.prop['skipini']:
return
self.pidevice.INI() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_if_interactive(self):\n pass",
"def cmd_INTERFACE(self, line):\r\n config = InterfaceOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'add':\r\n if (opts['addr'] and opts['etag'] and opts['itag']\r\n and opts['itype'] and opts['icls']):\r\n self.callToUser('addInterface', 'robot', opts['etag'],\r\n opts['itag'], opts['itype'], opts['icls'],\r\n opts['addr'])\r\n elif (opts['etag'] and opts['itag'] and opts['itype'] and\r\n opts['icls']):\r\n self.callToUser('addInterface', 'robot', opts['etag'],\r\n opts['itag'], opts['itype'], opts['icls'])\r\n elif cmd == 'remove':\r\n if opts['etag'] and opts['itag']:\r\n self.callToUser('removeInterface', 'robot', opts['etag'],\r\n opts['itag'])",
"def UseCommandInterface(self, option):\n if option:\n #change prompt\n if self.sim42interp.cmd.currentObj and hasattr(self.sim42interp.cmd.currentObj, 'GetPath'):\n sys.ps1 = self.sim42interp.cmd.currentObj.GetPath() + '> '\n else:\n sys.ps1 = 'Illegal current object> ' \n \n #Say good bye\n self.shell.run(\"print '*************** Changed to Sim42 Command Interface ***************' \", prompt=0, verbose=0)\n \n #Change\n self.shell.interp = self.sim42interp\n\n else:\n #change prompt\n sys.ps1 = '>>> '\n sys.ps2 = '... '\n\n #Change\n self.shell.interp = self.origInterp \n \n #Say hello\n self.shell.run(\"print '*************** Back to python ***************' \", prompt=0, verbose=0)\n \n\n self.shell.autoCompleteKeys = self.shell.interp.getAutoCompleteKeys()",
"def test_invocations_ini():\n invocations = labeled.contents(label=\"invocations\")\n tox = Path(\"tox.ini\").read_text(encoding=\"utf-8\")\n gendir = \".gendir-ini\"\n assert gendir in invocations\n assert f\"output_directory = {gendir}\" in tox",
"def comando_inutiliza(self):\r\n if args.xml:\r\n # para inutilizacoes atraves de xml -- fazer teste\r\n return \r\n\r\n if not args.serie:\r\n parser.error(\"informe a opcao -s para serie\")\r\n if not args.notas:\r\n parser.error(\"informe a opcao -n para as notas\")\r\n\r\n self.inutiliza_por_lote(args.notas, args.serie, args.justificativa)",
"def Scan(self, argin):\n handler = self.get_command_object(\"Scan\")\n handler(argin)",
"def runin(cmd, stdin):\n result = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n result.wait()\n return result.returncode",
"def check_interface(self, interface):\n\n command = \"ifconfig %s > /dev/null\" % interface\n return subprocess.call(command, shell=True)",
"def initialize_interface(self):\n self.args, self.phil_args = parse_command_args(self.iver,\n self.help_message).parse_known_args()\n ginp = InputFinder()\n\n # Check for type of input\n if not self.args.path: # No input\n parse_command_args(self.iver, self.help_message).print_help()\n if self.args.default: # Write out default params and exit\n help_out, txt_out = inp.print_params()\n print('\\n{:-^70}\\n'.format('IOTA Parameters'))\n print(help_out)\n return False, 'IOTA_XTERM_INIT: OUTPUT PARAMETERS ONLY'\n elif len(self.args.path) > 1: # If multiple paths / wildcards\n file_list = ginp.make_input_list(self.args.path)\n list_file = os.path.join(os.path.abspath(os.path.curdir), 'input.lst')\n with open(list_file, 'w') as lf:\n lf.write('\\n'.join(file_list))\n msg = \"\\nIOTA will run in AUTO mode using wildcard datapath:\\n\" \\\n \"{} files found, compiled in {}\\n\".format(len(file_list), list_file)\n self.iota_phil = inp.process_input(self.args, self.phil_args, list_file,\n 'auto', self.now)\n self.params = self.iota_phil.extract()\n\n else: # If single path, check type\n carg = os.path.abspath(self.args.path[0])\n if os.path.isfile(carg):\n ptype = ginp.get_file_type(carg)\n if ptype.lower() in ('raw image', 'image pickle'):\n msg = \"\\nIOTA will run in SINGLE-FILE mode using {}:\\n\".format(carg)\n mode = 'auto'\n elif ('iota' and 'settings' in ptype.lower()):\n msg = '\\nIOTA will run in SCRIPT mode using {}:\\n'.format(carg)\n mode = 'file'\n elif 'list' in ptype.lower():\n msg = \"\\nIOTA will run in AUTO mode using {}:\\n\".format(carg)\n mode = 'auto'\n else:\n pr = 'WARNING! File format not recognized. Proceed anyway? [Y/N] '\n unknown_file = raw_input(pr)\n if 'y' in unknown_file.lower():\n ftype = raw_input(\"File type? [image, list, or parameters] \")\n msg = \"\\nIOTA will run WITH DODGY input using {}:\\n\".format(carg)\n if 'par' in ftype:\n mode = 'file'\n else:\n mode = 'auto'\n else:\n print('Exiting...')\n return False, 'IOTA_XTERM_INIT_ERROR: Unrecognizable input!'\n elif os.path.isdir(carg):\n ptype = ginp.get_folder_type(carg)\n if ('image' and 'folder' in ptype.lower()):\n msg = \"\\nIOTA will run in AUTO mode using {}:\\n\".format(carg)\n mode = 'auto'\n else:\n msg = \"IOTA_XTERM_INIT_ERROR: No images in {}!\".format(carg)\n print(self.logo)\n print(msg)\n return False, msg\n\n # If user provided gibberish\n else:\n msg = \"IOTA_XTERM_INIT_ERROR: Invalid input! Need parameter filename \" \\\n \"or data folder.\"\n print(self.logo)\n print(msg)\n return False, msg\n\n # Initialize parameters for this command-line run\n self.iota_phil = inp.process_input(self.args, self.phil_args,\n carg, mode, self.now)\n self.params = self.iota_phil.extract()\n\n # Identify indexing / integration program and add to logo\n b_end = \" with {}\".format(str(self.params.advanced.processing_backend).upper())\n prg = \"{:>{w}}\".format(b_end, w=76)\n self.logo += prg\n print(self.logo)\n print('\\n{}\\n'.format(self.now))\n if msg != '':\n print(msg)\n\n if self.args.analyze is not None:\n print('ANALYSIS ONLY will be performed (analyzing run #{})'.format(\n self.args.analyze))\n self.analyze_prior_results('{:003d}'.format(int(self.args.analyze)))\n return False\n\n if self.params.mp.method == 'mpi':\n rank, size = get_mpi_rank_and_size()\n self.master_process = rank == 0\n else:\n self.master_process = True\n\n # Call function to read input folder structure (or input file) and\n # generate list of image file paths\n\n with prog_message(\"Reading input files\"):\n self.input_list = self.make_input_list()\n\n # Select range of images/objects if turned on\n if self.params.advanced.image_range.flag_on:\n self.input_list = self.select_image_range(self.input_list)\n\n # Pick a randomized subset of images/objects if turned on\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(self.input_list):\n with prog_message(\"Selecting {} random images out of {} found\"\n \"\".format(self.params.advanced.random_sample.number,\n len(self.input_list))):\n self.input_list = self.select_random_subset(self.input_list)\n\n # Check for -l option, output list of input files and exit\n if self.args.list:\n list_file = os.path.abspath(\"{}/input.lst\".format(os.curdir))\n\n # Check if other files of this name exist under the current folder\n list_folder = os.path.dirname(list_file)\n list_files = [i for i in os.listdir(list_folder) if i.endswith(\".lst\")]\n if len(list_files) > 0:\n list_file = os.path.join(list_folder,\n \"input_{}.lst\".format(len(list_files)))\n\n msg = 'IOTA_XTERM_INIT: INPUT LIST ONLY option selected'\n print ('\\n{}'.format(msg))\n print ('Input list in {} \\n\\n'.format(list_file))\n with open(list_file, \"w\") as lf:\n for i, input_file in enumerate(self.input_list, 1):\n lf.write('{}\\n'.format(input_file))\n print (\"{}: {}\".format(i, input_file))\n lf.write('{}\\n'.format(input_file))\n print ('\\nExiting...\\n\\n')\n return False, msg\n\n return True, 'IOTA_XTERM_INIT: Initialization complete!'",
"def test_main_interactive(monkeypatch):\n url = StringIO(\n '%s\\n%s\\n%s\\n%s' %\n (RANDOM_URL, RANDOM_USERNAME, RANDOM_PASSWORD, RANDOM_TENANTCODE))\n\n # Send std in and run the main function\n monkeypatch.setattr('sys.stdin', url)\n\n # Stage the empty args\n args = argparse.Namespace(url=None,\n username=None,\n password=None,\n tenantcode=None)\n\n result = Config(\"wso_interactive.json\").main(args)\n\n assert result is True",
"def Configure(self, argin):\n handler = self.get_command_object(\"Configure\")\n handler(argin)",
"def exe(self, inp):\n try:\n spl = shlex.split(inp)\n except:\n self.err_print('Mismatched quotations.')\n self.command_event.set()\n return\n\n if not spl:\n self.err_print(\"\")\n elif spl[0] in self.commands:\n self.err_print(\"\")\n self.commands[spl[0]](spl[1:])\n else:\n self.err_print('Invalid command: ' + spl[0])\n\n self.command_event.set()",
"def ex(cmd):\n exec cmd in user_ns()",
"async def inf(self, ctx):\n\n\t\traise MissingSubcommand()",
"def _cli():\n pass",
"def switch(self, an_input: str):\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.onchain_parser.parse_known_args(\n an_input.split()\n )\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n system_clear()\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(other_args)",
"def ipython(args):\n if \"-h\" not in args and \"--help\" not in args:\n ipython_message()\n call([\"ipython\"] + list(args))",
"def run_config(self, device, command, *argv, **kwarg):\n ############# Implement me ################\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n params = kwarg[\"params\"]\n if not params or not params[0]:\n return 0, \"Need to specify config file name\"\n param = params[0]\n fname = param[\"config_file_name\"]\n name = os.path.basename(fname)\n if command == \"load_config\":\n files = IxnetworkIxiaClientImpl.session.GetFileList()\n found = False\n for f in files[\"files\"]:\n if f[\"name\"] == name:\n found = True\n break\n if not found:\n out = IxnetworkIxiaClientImpl.session.UploadFile(fname, name)\n out = IxnetworkIxiaClientImpl.ixnet.LoadConfig(Files(name))\n # get the traffic items back\n IxnetworkIxiaClientImpl.tis = IxnetworkIxiaClientImpl.ixnet.Traffic.TrafficItem.find()\n elif command == \"save_config\":\n out = IxnetworkIxiaClientImpl.ixnet.SaveConfig(Files(name))\n out += IxnetworkIxiaClientImpl.session.DownloadFile(name, fname)\n return 0, out",
"def is_in_cmd(self):\r\n return self.select_cmd is not None",
"def main():\n argument_spec = infinibox_argument_spec()\n null_list = list()\n argument_spec.update(\n dict(\n host=dict(required=True),\n state=dict(default='present', choices=['stat', 'present', 'absent']),\n wwns=dict(type='list', default=list()),\n iqns=dict(type='list', default=list()),\n )\n )\n\n module = AnsibleModule(argument_spec, supports_check_mode=True)\n\n if not HAS_INFINISDK:\n module.fail_json(msg=missing_required_lib('infinisdk'))\n\n check_options(module)\n execute_state(module)",
"def onecmd(self, cmdline):\n return self.default(cmdline)",
"def default_interface(dut,**kwargs):\n cli_type = st.get_ui_type(dut, **kwargs)\n\n if 'interface' not in kwargs:\n st.error(\"Mandatory arg interface is not present\")\n return False\n else:\n interface = kwargs['interface']\n\n skip_error = kwargs.pop('skip_error', False)\n command = ''\n\n if cli_type == 'klish':\n if 'range' in kwargs:\n command = command + \"\\n\" + \"default interface range {}\".format(interface)\n else:\n command = command + \"\\n\" + \"default interface {}\".format(interface)\n else:\n st.error(\"Invalid cli_type for this API - {}.\".format(cli_type))\n return False\n\n st.config(dut, command, type='klish',skip_error_check=skip_error)\n return True",
"def query_cmdline():",
"def do_con_read(self, *args):\n with suppress(SystemExit):\n command = self.cli.con_parser.parse_args(args)\n command.func(**vars(command))",
"def runCLI(self):\n\t\tself.available_cmds['help'].__call__()\n\n\t\twhile True:\n\t\t\tcmd = input('--> Enter Cmd: ')\n\t\t\tprint(\"\\n\")\n\t\t\tcmd = cmd.split()\n\n\t\t\tif len(cmd) > 0 and cmd[0] in self.available_cmds:\n\t\t\t\tif len(cmd) >= 1:\n\t\t\t\t\targs = cmd[1:]\n\t\t\t\telse:\n\t\t\t\t\targs = []\n\n\t\t\t\tself.available_cmds[cmd[0]].__call__(args)",
"def getopt():\n raise NotImplementedError()",
"def cli(self, env):\n raise NotImplementedError",
"def is_configured(command):\n return command in COMMANDS",
"async def ign(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)\n return",
"def execCommand(self, command, opts):\n if command == \"attach\":\n self.attachToComponent(opts)\n elif command == \"status\":\n self.getComponentStatus(opts)\n elif command == \"start\":\n self.startComponent(opts)\n elif command == \"stop\":\n self.stopComponent(opts)\n elif command == \"restart\":\n self.restartComponent(opts)\n else:\n print(\"Command %s not recognized\" % command)"
]
| [
"0.5661725",
"0.5556138",
"0.55064124",
"0.52995545",
"0.5288064",
"0.52753496",
"0.5252175",
"0.52297914",
"0.5213469",
"0.5204751",
"0.51983714",
"0.5183917",
"0.517432",
"0.5154659",
"0.51525563",
"0.51415825",
"0.51256984",
"0.5110826",
"0.51041824",
"0.5098343",
"0.50814635",
"0.50696725",
"0.5035948",
"0.5019679",
"0.5007186",
"0.5005632",
"0.49947602",
"0.49899346",
"0.4978247",
"0.49697387"
]
| 0.678965 | 0 |
Reset servo if it has been changed during referencing. | def resetservo(self):
debug('ControllerStartup.resetservo()')
if self.servostates is not None:
setservo(self.pidevice, self.servostates)
elif self._databuf['servobuf']:
setservo(self.pidevice, self._databuf['servobuf']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def poweron(self) -> None:\n self.servo_reset()",
"def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1",
"def set_servo(self, servo: int, position: Optional[ServoPosition]) -> None:\n if servo < 0 or servo >= self._num_servos:\n raise RuntimeError(\"That servo does not exist.\")",
"def servo_off(self):\n msg = b'\\x0C\\x00'\n self.__bt.write(msg)",
"def SelectServo(self, servo):\n if servo == 'none':\n self._servo_port = None\n elif servo == 'any':\n self._servo_port = 0\n else:\n self._servo_port = int(servo)\n self._out.Notice('Servo port %s' % str(self._servo_port))",
"def servo_force(self, *args, **kwargs) -> Any:\n pass",
"def reset(self):\n if self.arduino:\n self.arduino.stop()\n\n time.sleep(.5)\n\n self.arduino = arduino.find_arduino(self.arduino_serial)\n self.arduino.start_monitor()\n\n self.driver.stop()\n self.last_control = time.time()",
"def set_servo_detach(self, servo_id=None):\r\n return self._arm.set_servo_detach(servo_id=servo_id)",
"def reset(self):\n self.stuck = False",
"def set_servo(name,servo,value):\n name = _lookup(name)\n servo_data = list(name) + [-1,-1,-1,-1]\n servo_data[servo + 1] = value\n mc.set('servo_values',servo_data)",
"def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))",
"def reset(self):\n self.restart()\n self.cycles = 0",
"def reset_target(scope) -> None:\n if globals.cw_platform == \"CW303\" or globals.cw_platform == \"CWLITEXMEGA\":\n scope.io.pdic = 'low'\n time.sleep(0.1)\n scope.io.pdic = 'high_z' #XMEGA doesn't like pdic driven high\n time.sleep(0.1) #xmega needs more startup time\n else: \n scope.io.nrst = 'low'\n time.sleep(0.05)\n scope.io.nrst = 'high_z'\n time.sleep(0.05)",
"def reset(self):\n # print(\"Joint (reset): {}\".format(np.around(self.joints_state.position, decimals=3)))\n init_joint_pos = [1.5, -1.2, 1.4, -1.87, -1.57, 0]\n self.publisher_to_moveit_object.set_joints(init_joint_pos)\n\n # print(\">>>>>>>>>>>>>>>>>>> RESET: waiting for the movement to complete\")\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n # print(\">>>>>>>>>>>>>>>>>>> RESET: Waiting complete\")\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(init_joint_pos, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n\n self.set_target_object(random_object=self._random_object, random_position=self._random_position)\n self._check_all_systems_ready()\n\n with open('contact_1_force.yml', 'w') as yaml_file:\n yaml.dump(0.0, yaml_file, default_flow_style=False)\n with open('contact_2_force.yml', 'w') as yaml_file:\n yaml.dump(0.0, yaml_file, default_flow_style=False)\n with open('collision.yml', 'w') as yaml_file:\n yaml.dump(False, yaml_file, default_flow_style=False)\n observation = self.get_obs()\n self.object_position = observation[9:12]\n\n # print(\"Joint (after): {}\".format(np.around(observation[1:7], decimals=3)))\n\n # get maximum distance to the object to calculate reward\n self.max_distance, _ = U.get_distance_gripper_to_object()\n self.min_distace = self.max_distance\n state = U.get_state(observation)\n self._update_episode()\n return state",
"def _reset_arm(self, reset_angles):\n self._actuator_comms['UR5'].actuator_buffer.write(self._stopj_packet)\n time.sleep(0.5)\n\n self._reset_packet[1:1 + 6][self._joint_indices] = reset_angles\n self._actuator_comms['UR5'].actuator_buffer.write(self._reset_packet)\n time.sleep(max(self._reset_packet[-2] * 1.5, 2.0))",
"def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)",
"def reset(self):\n self.observation = None\n self.episode_done = True",
"def reset_computer(self):\n self._greediness = 7\n self._rolls = 0",
"def _reset_wheel(self):\n [j.reset_dynamic_object() for j in self.wheels]\n\n p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]\n\n for i in range(self.num_wheels):\n self.joints_slipping[i].set_position([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.joints_slipping[i].set_orientation(p[i],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_orientation([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)",
"def reset(self):\n self.disable()\n self.shoot_again = False\n self.balls_added_live = 0",
"def onReset(self):\n # stop timer\n self.TIMER_ALIVE_AGT.stop()\n \n # cleanup remote agent\n self.resetAgent()",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def reset(self):\n log.debug(\"RESET\")\n self.ref_pos_x = -1\n self.ref_pos_y = -1\n self.ref_pos_z = -1\n self.pos_x = -1\n self.pos_y = -1\n self.pos_z = -1\n self.yaw = 0\n self.throw_ongoing = False",
"def set_servo_detach(self, servo_number=None):\n if servo_number is not None:\n cmd = protocol.DETACH_SERVO.format(servo_number)\n response = self.__send_and_receive(cmd)\n if response.startswith(protocol.OK.lower()):\n return True\n else:\n return False\n else:\n if self.set_servo_detach(0) and self.set_servo_detach(1) \\\n and self.set_servo_detach(2) and self.set_servo_detach(3):\n return True\n else:\n return False",
"def reset(self):\n LOGGER.info('Resetting plugin service')\n self.servicer.reset()\n\n if self.device_proxy:\n self.device_proxy.reset()",
"def servo_make_default(self):\n self.servo_config.save_as_default_config()",
"def resetDetector (self):\n self.mpr121._reset ()",
"def stop(self):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_left.ChangeDutyCycle(0)\n self.pwm_right.ChangeDutyCycle(0)",
"def reset(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].reset()"
]
| [
"0.6844293",
"0.6448711",
"0.64227194",
"0.6132651",
"0.60031986",
"0.59832084",
"0.59482604",
"0.58659905",
"0.5832211",
"0.577215",
"0.56202066",
"0.5544754",
"0.55105555",
"0.55058527",
"0.5457077",
"0.5444339",
"0.54216516",
"0.54005915",
"0.53958654",
"0.5393592",
"0.53523433",
"0.53510374",
"0.5342201",
"0.5334959",
"0.5323299",
"0.5315874",
"0.53080225",
"0.52843964",
"0.52762264",
"0.5269587"
]
| 0.7412218 | 0 |
Reference unreferenced axes if according option has been provided and wait on completion. | def referencewait(self):
debug('ControllerStartup.referencewait()')
if not self.refmodes or self.prop['skipref']:
return
self._databuf['servobuf'] = getservo(self.pidevice, self.pidevice.axes)
toreference = {} # {cmd: [axes]}
for i, refmode in enumerate(self._refmodes[:self.pidevice.numaxes]):
if not refmode:
continue
axis = self.pidevice.axes[i]
refmode = refmode.upper()
if refmode not in toreference:
toreference[refmode] = []
if self._isreferenced(refmode, axis):
debug('axis %r is already referenced by %r', axis, refmode)
else:
toreference[refmode].append(self.pidevice.axes[i])
waitonaxes = []
for refmode, axes in toreference.items():
if not axes:
continue
if refmode == 'POS':
self._ref_with_pos(axes)
elif refmode == 'ATZ':
self._autozero(axes)
else:
self._ref_with_refcmd(axes, refmode)
waitonaxes += axes
waitonreferencing(self.pidevice, axes=waitonaxes, **self._kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _ref_with_refcmd(self, axes, refmode):\n debug('ControllerStartup._ref_with_refcmd(axes=%s, refmode=%s)', axes, refmode)\n for axis in axes:\n if self.pidevice.HasRON():\n try:\n self.pidevice.RON(axis, True)\n except GCSError as exc:\n if exc == gcserror.E34_PI_CNTR_CMD_NOT_ALLOWED_FOR_STAGE:\n pass # hexapod axis\n else:\n raise\n try:\n getattr(self.pidevice, refmode)(axis)\n except GCSError as exc:\n if exc == gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO:\n self._databuf['servobuf'][axis] = getservo(self.pidevice, axis)[axis]\n self.pidevice.SVO(axis, not self._databuf['servobuf'][axis])\n getattr(self.pidevice, refmode)(axis)\n else:\n raise\n if self.pidevice.devname in ('C-843',):\n waitonreferencing(self.pidevice, axes=axis, **self._kwargs)\n waitonready(self.pidevice)",
"def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )",
"def _doPlots(self):\n ax = self.sp.ax\n if ax: ax.helper.doPlots()\n # Setting calls now use new local options\n self.opts.newLocal()",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)",
"def test_link_axes(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPlotter(ax, self.segy)\n # should add one artist to our axes\n splt.plot_wiggles(wiggle_traces=True)\n self.assertEqual(len(splt.ACTIVE_LINES['wiggle_traces']), 1)\n self.assertTrue('wiggle_traces' not in splt.INACTIVE_LINES)\n self.assertEqual(len(ax.lines), 1)\n # should remove one artist to our axes\n splt.plot_wiggles(wiggle_traces=False)\n self.assertTrue('wiggle_traces' not in splt.ACTIVE_LINES)\n self.assertEqual(len(splt.INACTIVE_LINES['wiggle_traces']), 1)\n self.assertEqual(len(ax.lines), 0)",
"def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()",
"def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()",
"def waitonreferencing(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)\n maxtime = time() + timeout\n if pidevice.devname in ('C-843',):\n pidevice.errcheck = False\n while not all(list(pidevice.qFRF(axes).values())):\n if time() > maxtime:\n stopall(pidevice)\n raise SystemError('waitonreferencing() timed out after %.1f seconds' % timeout)\n sleep(polldelay)\n if pidevice.devname in ('C-843',):\n pidevice.errcheck = True\n sleep(postdelay)",
"def __exit__(self, *args):\n # Do the last (and perhaps only) call's plotting\n self._doPlots()\n self._isSubplot = False\n self.opts.goGlobal()\n if not self.usingAgg:\n self.fig.canvas.mpl_connect('resize_event', self.subplots_adjust)",
"def after_fit(self):\n plt.close(self.graph_ax.figure)",
"def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])",
"def test_parse_axes():\n fig, ax = plt.subplots()\n\n # parse correct ax in ax\n argsref = (1, 'a', np.arange(2))\n argsax = prettypyplot.tools.parse_axes(*argsref, ax=ax)\n assert all(\n isinstance(ref, type(parse))\n for ref, parse in zip(argsref, argsax[0])\n )\n assert ax is argsax[1]\n\n # multiple axes\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(ax, ax=ax)\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(1, ax, ax=ax)\n with pytest.raises(ValueError):\n prettypyplot.tools.parse_axes(ax, ax, 1, ax=None)\n\n argsax = prettypyplot.tools.parse_axes(ax, ax=None)\n assert ax is argsax[1]\n\n argsax = prettypyplot.tools.parse_axes(ax=ax)\n assert ax is argsax[1]",
"def onRemove(self):\n # Ensure taht we can work\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.axId = self.widget(QtGui.QSpinBox, \"axesIndex\")\n\n # Don't remove first axes\n if not form.axId.value():\n msg = QtGui.QApplication.translate(\n \"plot_console\",\n \"Axes 0 can not be deleted\",\n None,\n QtGui.QApplication.UnicodeUTF8)\n App.Console.PrintError(msg + \"\\n\")\n return\n # Remove axes\n ax = plt.axes\n ax.set_axis_off()\n plt.axesList.pop(form.axId.value())\n # Ensure that active axes is correct\n index = min(form.axId.value(), len(plt.axesList) - 1)\n form.axId.setValue(index)\n plt.update()",
"def effect(self):\n AxisType = self.options.AxisType\n AxisDescription = self.options.AxisDescription\n AxisUnit = self.options.AxisUnit\n AxisLabel = self.options.AxisLabel\n AxisMaxValue = self.options.AxisMaxValue\n AxisMinValue = self.options.AxisMinValue\n AxisScale = self.options.AxisScale\n \n \n for id, node in self.selected.iteritems():\n axis = node #TODO: This selection should be further tested\n axis.set(inkex.addNS(\"Type\",\"TimeAnalysis\"), \"Axis\")\n axis.set(inkex.addNS(\"AxisType\",\"TimeAnalysis\"), AxisType)\n axis.set(inkex.addNS(\"AxisDescription\",\"TimeAnalysis\"), AxisDescription)\n #TODO: The label should be unique.\n axis.set(inkex.addNS(\"AxisLabel\",\"TimeAnalysis\"), AxisLabel) \n axis.set(inkex.addNS(\"AxisUnit\",\"TimeAnalysis\"), AxisUnit)\n axis.set(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"), AxisMaxValue)\n axis.set(inkex.addNS(\"AxisMinValue\",\"TimeAnalysis\"), AxisMinValue)\n axis.set(inkex.addNS(\"AxisScale\",\"TimeAnalysis\"), AxisScale)\n # sys.stderr.write(\"The max value of the axis is: \" + str(axis.get(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"))))",
"def clear_axes_selection(self):\n self.x_axis = ''\n self.y_axis = ''\n self.non_numeric_x_axis = False\n self.count_desired = False\n self.header_choices('x')",
"def reorderAxesEvent(self):\n axisB = self.sender().text()\n self.myParent.swapAxes(self.axisName, axisB)\n self.myParent.setVistrailsVariableAxes()",
"def run(self):\n # fill the x_values,y_values,z_values dictionaries\n if not self.__fillCoordinatesFromSource():\n self.raiseAWarning('Nothing to Plot Yet. Returning.')\n return\n\n self.counter += 1\n if self.counter > 1:\n self.actcm = None\n clusterDict = deepcopy(self.outStreamTypes)\n\n # start plotting.... loop over the plots that need to be included in this figure\n for pltIndex in range(len(self.outStreamTypes)):\n plotSettings = self.options['plotSettings']['plot'][pltIndex]\n if 'gridLocation' in plotSettings:\n x = None\n y = None\n if 'x' in plotSettings['gridLocation']:\n x = list(map(int, plotSettings['gridLocation']['x'].strip().split(' ')))\n else:\n x = None\n if 'y' in plotSettings['gridLocation'].keys():\n y = list(map(int, plotSettings['gridLocation']['y'].strip().split(' ')))\n else:\n y = None\n if pltIndex == 0:\n self.ax.remove() # remove axis so that there is not an extra axis on plot with subplots\n if (len(x) == 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]], projection='3d')\n elif (len(x) == 1 and len(y) != 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0], y[0]:y[-1]], projection='3d')\n elif (len(x) != 1 and len(y) == 1):\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]], projection='3d')\n else:\n if self.dim == 2:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]])\n else:\n self.ax = self.fig.add_subplot(self.gridSpace[x[0]:x[-1], y[0]:y[-1]], projection='3d')\n\n if 'gridSpace' in self.options['plotSettings']:\n self.ax.locator_params(axis='y', nbins=4)\n self.ax.locator_params(axis='x', nbins=2)\n if 'range' in plotSettings:\n axes_range = plotSettings['range']\n if 'ymin' in axes_range:\n self.ax.set_ylim(bottom=ast.literal_eval(axes_range['ymin']))\n if 'ymax' in axes_range:\n self.ax.set_ylim(top=ast.literal_eval(axes_range['ymax']))\n if 'xmin' in axes_range:\n self.ax.set_xlim(left=ast.literal_eval(axes_range['xmin']))\n if 'xmax' in axes_range:\n self.ax.set_xlim(right=ast.literal_eval(axes_range['xmax']))\n if self.dim == 3:\n if 'zmin' in axes_range.options['plotSettings']['plot'][pltIndex]:\n if 'zmax' not in axes_range.options['plotSettings']:\n self.raiseAWarning('zmin inputted but not zmax. zmin ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(self.options['plotSettings']['zmax']))\n if 'zmax' in axes_range:\n if 'zmin' not in axes_range:\n self.raiseAWarning('zmax inputted but not zmin. zmax ignored! ')\n else:\n self.ax.set_zlim(bottom=ast.literal_eval(axes_range['zmin']), top=ast.literal_eval(axes_range['zmax']))\n if 'xlabel' not in plotSettings:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(plotSettings['xlabel'])\n if 'ylabel' not in plotSettings:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(plotSettings['ylabel'])\n if 'zlabel' in plotSettings:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(plotSettings['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n else:\n if 'xlabel' not in self.options['plotSettings']:\n self.ax.set_xlabel('x')\n else:\n self.ax.set_xlabel(self.options['plotSettings']['xlabel'])\n if 'ylabel' not in self.options['plotSettings']:\n self.ax.set_ylabel('y')\n else:\n self.ax.set_ylabel(self.options['plotSettings']['ylabel'])\n if 'zlabel' in self.options['plotSettings']:\n if self.dim == 2:\n self.raiseAWarning('zlabel keyword does not make sense in 2-D Plots!')\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel(self.options['plotSettings']['zlabel'])\n elif self.dim == 3 and self.zCoordinates:\n self.ax.set_zlabel('z')\n\n if 'legend' in self.options['plotSettings']:\n if 'label' not in plotSettings.get('attributes', {}):\n if 'attributes' not in plotSettings:\n plotSettings['attributes'] = {}\n plotSettings['attributes']['label'] = self.outStreamTypes[pltIndex] + ' ' + str(pltIndex)\n #################\n # SCATTER PLOT #\n #################\n self.raiseADebug(f'creating plot {self.name}')\n if self.outStreamTypes[pltIndex] == 'scatter':\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n if self.colorMapCoordinates[pltIndex] is not None:\n # Find the max and min colormap values\n firstKey = utils.first(self.xValues[pltIndex].keys())\n vmin = np.amin(self.colorMapValues[pltIndex][firstKey])\n vmax = np.amax(self.colorMapValues[pltIndex][firstKey])\n for key in self.xValues[pltIndex]:\n vmin = min(vmin,np.amin(self.colorMapValues[pltIndex][key]))\n vmax = max(vmax,np.amax(self.colorMapValues[pltIndex][key]))\n plotSettings['norm'] = matplotlib.colors.Normalize(vmin,vmax)\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n scatterPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['norm'] = plotSettings['norm']\n scatterPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][xIndex]\n scatterPlotOptions['cmap'] = matplotlib.cm.get_cmap(\"winter\")\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n try:\n self.actcm.draw_all()\n # this is not good, what exception will be thrown?\n except:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m, ax=self.ax)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n **scatterPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n scatterPlotOptions['c'] = self.colorMapValues[pltIndex][key][zIndex]\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n scatterPlotOptions['cmap'] = plotSettings['cmap']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if 'color' not in scatterPlotOptions:\n scatterPlotOptions['c'] = plotSettings['c']\n self.actPlot = self.ax.scatter(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex], **scatterPlotOptions)\n #################\n # LINE PLOT #\n #################\n elif self.outStreamTypes[pltIndex] == 'line':\n minV = 0\n maxV = 0\n # If the user does not define an appropriate cmap, then use matplotlib's default.\n if 'cmap' not in plotSettings or plotSettings['cmap'] not in matplotlib.cm.datad:\n plotSettings['cmap'] = None\n if bool(self.colorMapValues):\n for key in self.xValues[pltIndex]:\n minV = min(minV,self.colorMapValues[pltIndex][key][-1][-1])\n maxV = max(maxV,self.colorMapValues[pltIndex][key][-1][-1])\n cmap = matplotlib.cm.ScalarMappable(matplotlib.colors.Normalize(minV, maxV, True), plotSettings['cmap'])\n cmap.set_array([minV,maxV])\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.colorMapCoordinates[pltIndex] is not None:\n plotSettings['interpPointsX'] = str(max(200, len(self.xValues[pltIndex][key][xIndex])))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n if self.yValues[pltIndex][key][yIndex].size < 2:\n return\n xi, yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings, returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(xi, yi, c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(xi, yi, **plotSettings.get('attributes', {}))\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n c=cmap.cmap(self.colorMapValues[pltIndex][key][-1][-1]/(maxV-minV)))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if self.actcm is None:\n self.actcm = self.fig.colorbar(cmap)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.actcm.draw_all()\n else:\n self.actPlot = self.ax.plot(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n self.zValues[pltIndex][key][zIndex],\n **plotSettings.get('attributes', {}))\n ##################\n # HISTOGRAM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'histogram':\n if 'bins' not in plotSettings:\n if self.dim == 2:\n plotSettings['bins'] = '10'\n else:\n plotSettings['bins'] = '4'\n if 'normed' not in plotSettings:\n plotSettings['normed'] = 'False'\n if 'weights' not in plotSettings:\n plotSettings['weights'] = 'None'\n if 'cumulative' not in plotSettings:\n plotSettings['cumulative'] = 'False'\n if 'histtype' not in plotSettings:\n plotSettings['histtype'] = 'bar'\n if 'align' not in plotSettings:\n plotSettings['align'] = 'mid'\n if 'orientation' not in plotSettings:\n plotSettings['orientation'] = 'vertical'\n if 'rwidth' not in plotSettings:\n plotSettings['rwidth'] = 'None'\n if 'log' not in plotSettings:\n plotSettings['log'] = 'None'\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'stacked' not in plotSettings:\n plotSettings['stacked'] = 'None'\n if self.sourceData[0].type.strip() == 'HistorySet':\n #####################################################################################################################################\n # @MANDD: This 'if' condition has been added in order to allow the user the correctly create an histogram out of an historySet #\n # If the histogram is created out of the input variables, then the plot has an identical meaning of the one generated by a pointSet #\n # However, if the histogram is created out of the output variables, then the plot consider only the last value of the array #\n #####################################################################################################################################\n data = {}\n data['x'] = np.empty(0)\n data['y'] = np.empty(0)\n for index in range(len(self.outStreamTypes)):\n for key in self.xValues[index]:\n data['x'] = np.append(data['x'], self.xValues[index][key][0][-1])\n if self.dim == 3:\n data['y'] = np.append(data['y'], self.yValues[index][key][0][-1])\n del self.xValues[index]\n self.xValues = {}\n self.xValues[index] = {}\n self.xValues[index][0] = []\n self.xValues[index][0].append(deepcopy(data['x']))\n if self.dim == 3:\n del self.yValues[index]\n self.yValues = {}\n self.yValues[index] ={ }\n self.yValues[index][0] = []\n self.yValues[index][0].append(deepcopy(data['y']))\n\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n try:\n colorss = ast.literal_eval(plotSettings['color'])\n # unknown what specific error is anticipated here, but I don't like a bare except...\n # ast.literal_eval can raise the exceptions listed below (see library docs):\n except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):\n colorss = plotSettings['color']\n if self.dim == 2:\n self.ax.hist(self.xValues[pltIndex][key][xIndex],\n bins=ast.literal_eval(plotSettings['bins']),\n density=ast.literal_eval(plotSettings['normed']),\n weights=ast.literal_eval(plotSettings['weights']),\n cumulative=ast.literal_eval(plotSettings['cumulative']),\n histtype=plotSettings['histtype'],\n align=plotSettings['align'],\n orientation=plotSettings['orientation'],\n rwidth=ast.literal_eval(plotSettings['rwidth']),\n log=ast.literal_eval(plotSettings['log']),\n color=colorss,\n stacked=ast.literal_eval(plotSettings['stacked']),\n **plotSettings.get('attributes', {}))\n else:\n for yIndex in range(len(self.yValues[pltIndex][key])):\n hist, xedges, yedges = np.histogram2d(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n bins=ast.literal_eval(plotSettings['bins']))\n elements = (len(xedges) - 1) * (len(yedges) - 1)\n if 'x_offset' in plotSettings:\n xoffset = float(plotSettings['x_offset'])\n else:\n xoffset = 0.25\n if 'y_offset' in plotSettings:\n yoffset = float(plotSettings['y_offset'])\n else:\n yoffset = 0.25\n if 'dx' in plotSettings:\n dxs = float(plotSettings['dx'])\n else:\n dxs = (self.xValues[pltIndex][key][xIndex].max() - self.xValues[pltIndex][key][xIndex].min()) / float(plotSettings['bins'])\n if 'dy' in plotSettings:\n dys = float(plotSettings['dy'])\n else:\n dys = (self.yValues[pltIndex][key][yIndex].max() - self.yValues[pltIndex][key][yIndex].min()) / float(plotSettings['bins'])\n xpos, ypos = np.meshgrid(xedges[:-1] + xoffset, yedges[:-1] + yoffset)\n self.actPlot = self.ax.bar3d(xpos.flatten(),\n ypos.flatten(),\n np.zeros(elements),\n dxs*np.ones_like(elements),\n dys*np.ones_like(elements),\n hist.flatten(),\n color=colorss,\n zsort='average',\n **plotSettings.get('attributes', {}))\n ##################\n # STEM PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'stem':\n if 'linefmt' not in plotSettings:\n plotSettings['linefmt'] = 'b-'\n if 'markerfmt' not in plotSettings:\n plotSettings['markerfmt'] = 'bo'\n if 'basefmt' not in plotSettings:\n plotSettings['basefmt'] = 'r-'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.dim == 2:\n self.actPlot = self.ax.stem(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n linefmt=plotSettings['linefmt'],\n markerfmt=plotSettings['markerfmt'],\n basefmt = plotSettings['linefmt'],\n use_line_collection=True,\n **plotSettings.get('attributes', {}))\n else:\n # it is a basic stem plot constructed using a standard line plot. For now we do not use the previous defined keywords...\n for zIndex in range(len(self.zValues[pltIndex][key])):\n for xx, yy, zz in zip(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], self.zValues[pltIndex][key][zIndex]):\n self.ax.plot([xx, xx], [yy, yy], [0, zz], '-')\n ##################\n # STEP PLOT #\n ##################\n elif self.outStreamTypes[pltIndex] == 'step':\n if self.dim == 2:\n if 'where' not in plotSettings:\n plotSettings['where'] = 'mid'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][xIndex].size < 2:\n xi = self.xValues[pltIndex][key][xIndex]\n else:\n xi = np.linspace(self.xValues[pltIndex][key][xIndex].min(), self.xValues[pltIndex][key][xIndex].max(), ast.literal_eval(plotSettings['interpPointsX']))\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][yIndex].size <= 3:\n return\n yi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex], self.yValues[pltIndex][key][yIndex], plotSettings)\n self.actPlot = self.ax.step(xi, yi, where=plotSettings['where'], **plotSettings.get('attributes', {}))\n else:\n self.raiseAWarning('step Plot not available in 3D')\n return\n ########################\n # PSEUDOCOLOR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'pseudocolor':\n if self.dim == 2:\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n if not self.colorMapCoordinates:\n self.raiseAMessage('pseudocolor Plot needs coordinates for color map... Returning without plotting')\n return\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][zIndex].size <= 3:\n return\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n else:\n self.actPlot = self.ax.pcolormesh(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(ma.masked_where(np.isnan(Ci), Ci))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n actcm = self.fig.colorbar(m)\n actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n self.raiseAWarning('pseudocolor Plot is considered a 2D plot, not a 3D!')\n return\n ########################\n # SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'surface':\n if self.dim == 2:\n self.raiseAWarning('surface Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n if 'antialiased' not in plotSettings:\n plotSettings['antialiased'] = 'False'\n if 'linewidth' not in plotSettings:\n plotSettings['linewidth'] = '0'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n facecolors=matplotlib.cm.get_cmap(name=plotSettings['cmap'])(ma.masked_where(np.isnan(Ci), Ci)),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n linewidth=ast.literal_eval(plotSettings['linewidth']),\n antialiased=ast.literal_eval(plotSettings['antialiased']),\n **plotSettings.get('attributes', {}))\n ########################\n # TRI-SURFACE PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'tri-surface':\n if self.dim == 2:\n self.raiseAWarning('TRI-surface Plot is NOT available for 2D plots, it is 3D!')\n return\n else:\n if 'color' not in plotSettings:\n plotSettings['color'] = 'b'\n if 'shade' not in plotSettings:\n plotSettings['shade'] = 'False'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n metric = (self.xValues[pltIndex][key][xIndex] ** 2 + self.yValues[pltIndex][key][yIndex] ** 2) ** 0.5\n metricIndeces = np.argsort(metric)\n xs = np.zeros(self.xValues[pltIndex][key][xIndex].shape)\n ys = np.zeros(self.yValues[pltIndex][key][yIndex].shape)\n zs = np.zeros(self.zValues[pltIndex][key][zIndex].shape)\n for sindex in range(len(metricIndeces)):\n xs[sindex] = self.xValues[pltIndex][key][xIndex][metricIndeces[sindex]]\n ys[sindex] = self.yValues[pltIndex][key][yIndex][metricIndeces[sindex]]\n zs[sindex] = self.zValues[pltIndex][key][zIndex][metricIndeces[sindex]]\n surfacePlotOptions = {'color': plotSettings['color'],\n 'shade': ast.literal_eval(plotSettings['shade'])}\n surfacePlotOptions.update(plotSettings.get('attributes', {}))\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n surfacePlotOptions['cmap'] = matplotlib.cm.get_cmap(name = plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actPlot.cmap = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] != 'None':\n surfacePlotOptions[\"cmap\"] = matplotlib.cm.get_cmap(name=plotSettings['cmap'])\n self.actPlot = self.ax.plot_trisurf(xs, ys, zs, **surfacePlotOptions)\n ########################\n # WIREFRAME PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'wireframe':\n if self.dim == 2:\n self.raiseAWarning('wireframe Plot is NOT available for 2D plots, IT IS A 3D!')\n return\n else:\n if 'rstride' not in plotSettings:\n plotSettings['rstride'] = '1'\n if 'cstride' not in plotSettings:\n plotSettings['cstride'] = '1'\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][zIndex].size <= 3:\n return\n if self.colorMapCoordinates[pltIndex] is not None:\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n xig, yig, zi = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.zValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning(f'Currently, ax.plot_wireframe() in MatPlotLib version: {matplotlib.__version__} does not support a colormap! Wireframe plotted on a surface plot...')\n if self.actcm:\n first = False\n else:\n first = True\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name = plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n self.actPlot = self.ax.plot_surface(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n alpha=0.4,\n rstride=ast.literal_eval(plotSettings['rstride']),\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_array(self.colorMapValues[pltIndex][key])\n self.actcm = self.fig.colorbar(m)\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap=self.actPlot.cmap, norm=self.actPlot.norm)\n m.set_clim(vmin=min(self.colorMapValues[pltIndex][key][-1]), vmax=max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n if plotSettings['cmap'] == 'None':\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n if 'color' in plotSettings.get('attributes', {}):\n self.actPlot.set_color = plotSettings.get('attributes', {})['color']\n else:\n self.actPlot.set_color = 'blue'\n else:\n self.actPlot = self.ax.plot_wireframe(xig,\n yig,\n ma.masked_where(np.isnan(zi), zi),\n rstride=ast.literal_eval(plotSettings['rstride']),\n cstride=ast.literal_eval(plotSettings['cstride']),\n **plotSettings.get('attributes', {}))\n ########################\n # CONTOUR PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'contour' or self.outStreamTypes[pltIndex] == 'filledContour':\n if self.dim == 2:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n for key in self.xValues[pltIndex]:\n if not self.colorMapCoordinates:\n self.raiseAWarning(self.outStreamTypes[pltIndex] + ' Plot needs coordinates for color map... Returning without plotting')\n return\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink=0.8, extend='both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n else:\n self.raiseAWarning('contour/filledContour is a 2-D plot, where x,y are the surface coordinates and colorMap vector is the array to visualize!\\n contour3D/filledContour3D are 3-D! ')\n return\n # These should be combined: ^^^ & vvv\n elif self.outStreamTypes[pltIndex] == 'contour3D' or self.outStreamTypes[pltIndex] == 'filledContour3D':\n if self.dim == 2:\n self.raiseAWarning('contour3D/filledContour3D Plot is NOT available for 2D plots, IT IS A 2D! Check \"contour/filledContour\"!')\n return\n else:\n if 'numberBins' in plotSettings:\n nbins = int(plotSettings['numberBins'])\n else:\n nbins = 5\n if 'extend3D' in plotSettings:\n ext3D = bool(plotSettings['extend3D'])\n else:\n ext3D = False\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n # Hopefully, x,y, and z are all the same length, so checking this\n # here should be good enough.\n # The problem is you cannot interpolate any amount of space if\n # you only have a single data point.\n if self.xValues[pltIndex][key][xIndex].size == 1:\n self.raiseAWarning('Nothing to Plot Yet. Continuing to next plot.')\n continue\n for yIndex in range(len(self.yValues[pltIndex][key])):\n for zIndex in range(len(self.colorMapValues[pltIndex][key])):\n if self.actcm:\n first = False\n else:\n first = True\n xig, yig, Ci = mathUtils.interpolateFunction(self.xValues[pltIndex][key][xIndex],\n self.yValues[pltIndex][key][yIndex],\n plotSettings,\n z=self.colorMapValues[pltIndex][key][zIndex],\n returnCoordinate=True)\n if self.outStreamTypes[pltIndex] == 'contour3D':\n if plotSettings['cmap'] == 'None':\n if 'color' in plotSettings.get('attributes', {}):\n color = plotSettings.get('attributes', {})['color']\n else:\n color = 'blue'\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n colors=color,\n extend3d=ext3D,\n **plotSettings.get('attributes', {}))\n else:\n self.actPlot = self.ax.contour3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n extend3d=ext3D,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n else:\n if plotSettings['cmap'] == 'None':\n plotSettings['cmap'] = 'jet'\n self.actPlot = self.ax.contourf3D(xig,\n yig,\n ma.masked_where(np.isnan(Ci), Ci),\n nbins,\n cmap=matplotlib.cm.get_cmap(name=plotSettings['cmap']),\n **plotSettings.get('attributes', {}))\n self.ax.clabel(self.actPlot, inline=1, fontsize=10)\n if 'colorbar' not in self.options or self.options['colorbar']['colorbar'] != 'off':\n if first:\n self.actcm = self.fig.colorbar(self.actPlot, shrink = 0.8, extend = 'both')\n self.actcm.set_label(self.colorMapCoordinates[pltIndex][0].split('|')[-1].replace(')', ''))\n else:\n m = matplotlib.cm.ScalarMappable(cmap = self.actPlot.cmap, norm = self.actPlot.norm)\n m.set_clim(vmin = min(self.colorMapValues[pltIndex][key][-1]), vmax = max(self.colorMapValues[pltIndex][key][-1]))\n self.actcm.draw_all()\n ########################\n # DataMining PLOT #\n ########################\n elif self.outStreamTypes[pltIndex] == 'dataMining':\n colors = cycle(['#88CCEE', '#DDCC77', '#AA4499', '#117733', '#332288', '#999933', '#44AA99', '#882255', '#CC6677', '#CD6677', '#DC6877', '#886677', '#AA6677', '#556677', '#CD7865'])\n if 's' not in plotSettings:\n plotSettings['s'] = '20'\n if 'c' not in plotSettings:\n plotSettings['c'] = 'b'\n if 'marker' not in plotSettings:\n plotSettings['marker'] = 'o'\n if 'alpha' not in plotSettings:\n plotSettings['alpha'] = 'None'\n if 'linewidths' not in plotSettings:\n plotSettings['linewidths'] = 'None'\n clusterDict[pltIndex] = {}\n for key in self.xValues[pltIndex]:\n for xIndex in range(len(self.xValues[pltIndex][key])):\n for yIndex in range(len(self.yValues[pltIndex][key])):\n dataMiningPlotOptions = {'s': ast.literal_eval(plotSettings['s']),\n 'marker': (plotSettings['marker']),\n 'alpha': ast.literal_eval(plotSettings['alpha']),\n 'linewidths': ast.literal_eval(plotSettings['linewidths'])}\n if self.colorMapCoordinates[pltIndex] is not None:\n self.raiseAWarning('ColorMap values supplied, however DataMining plots do not use colorMap from input.')\n if plotSettings['cmap'] == 'None':\n self.raiseAWarning('ColorSet supplied, however DataMining plots do not use color set from input.')\n if 'cluster' == plotSettings['SKLtype']:\n # TODO: include the cluster Centers to the plot\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n dataMiningPlotOptions.update(plotSettings.get('attributes', {}))\n if self.dim == 2:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n clusterDict[pltIndex]['clusterValues'] = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n clusterDict[pltIndex]['clusterValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['clusterValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if self.dim == 2:\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n clusterDict[pltIndex]['clusterValues'][:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(int(clusterDict[pltIndex]['noClusters'])), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n\n # Handle all of the outlying data\n myMembers = self.clusterValues[pltIndex][1][0] == -1\n # resize the points\n dataMiningPlotOptions['s'] /= 2\n # and hollow out their markers\n if 'facecolors' in dataMiningPlotOptions:\n faceColors = dataMiningPlotOptions['facecolors']\n else:\n faceColors = None\n dataMiningPlotOptions['facecolors'] = 'none'\n\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['clusterValues'][myMembers, 0],\n clusterDict[pltIndex]['clusterValues'][myMembers, 1],\n clusterDict[pltIndex]['clusterValues'][myMembers, 2],\n color='#000000',\n **dataMiningPlotOptions)\n\n # Restore the plot options to their original values\n dataMiningPlotOptions['s'] *= 2\n if faceColors is not None:\n dataMiningPlotOptions['facecolors'] = faceColors\n else:\n del dataMiningPlotOptions['facecolors']\n\n elif 'bicluster' == plotSettings['SKLtype']:\n self.raiseAnError(IOError, 'SKLType Bi-Cluster Plots are not implemented yet!..')\n elif 'mixture' == plotSettings['SKLtype']:\n if 'noMixtures' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noMixtures'] = int(plotSettings.get('attributes', {})['noMixtures'])\n plotSettings.get('attributes', {}).pop('noMixtures')\n else:\n clusterDict[pltIndex]['noMixtures'] = np.amax(self.mixtureValues[pltIndex][1][0]) + 1\n if self.dim == 3:\n self.raiseAnError(IOError, 'SKLType Mixture Plots are only available in 2-Dimensions')\n else:\n clusterDict[pltIndex]['mixtureValues'] = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n clusterDict[pltIndex]['mixtureValues'][:, 0] = self.xValues[pltIndex][key][xIndex]\n clusterDict[pltIndex]['mixtureValues'][:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'mixtureCovars' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureCovars', (pltIndex, 0))\n # mixtureCovars = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureCovars')\n # else:\n # mixtureCovars = None\n if 'mixtureMeans' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('mixtureMeans', (pltIndex, 0))\n # mixtureMeans = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('mixtureMeans')\n # else:\n # mixtureMeans = None\n # mixtureCovars.reshape(3, 4)\n # mixtureMeans.reshape(3, 4)\n # for i, (mean, covar, col) in enumerate(zip(mixtureMeans, mixtureCovars, colors)):\n for i, col in zip(range(clusterDict[pltIndex]['noMixtures']), colors):\n if not np.any(self.mixtureValues[pltIndex][1][0] == i):\n continue\n myMembers = self.mixtureValues[pltIndex][1][0] == i\n self.actPlot = self.ax.scatter(clusterDict[pltIndex]['mixtureValues'][myMembers, 0],\n clusterDict[pltIndex]['mixtureValues'][myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n elif 'manifold' == plotSettings['SKLtype']:\n if self.dim == 2:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n manifoldValues = np.zeros(shape=(len(self.xValues[pltIndex][key][xIndex]), 3))\n manifoldValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n manifoldValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'clusterLabels' in plotSettings.get('attributes', {}):\n split = self.__splitVariableNames('clusterLabels', (pltIndex, 0))\n clusterDict[pltIndex]['clusterLabels'] = self.sourceData[pltIndex].getParam(split[1], split[2], nodeId = 'ending')\n plotSettings.get('attributes', {}).pop('clusterLabels')\n else:\n clusterDict[pltIndex]['clusterLabels'] = None\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(manifoldValues[myMembers, 0],\n manifoldValues[myMembers, 1],\n manifoldValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n if self.dim == 2:\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n manifoldValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(manifoldValues[:, 0],\n manifoldValues[:, 1],\n manifoldValues[:, 2],\n **dataMiningPlotOptions)\n elif 'decomposition' == plotSettings['SKLtype']:\n if self.dim == 2:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 2))\n else:\n decompositionValues = np.zeros(shape = (len(self.xValues[pltIndex][key][xIndex]), 3))\n decompositionValues[:, 0] = self.xValues[pltIndex][key][xIndex]\n decompositionValues[:, 1] = self.yValues[pltIndex][key][yIndex]\n if 'noClusters' in plotSettings.get('attributes', {}):\n clusterDict[pltIndex]['noClusters'] = int(plotSettings.get('attributes', {})['noClusters'])\n plotSettings.get('attributes', {}).pop('noClusters')\n else:\n clusterDict[pltIndex]['noClusters'] = np.amax(self.clusterValues[pltIndex][1][0]) + 1\n if self.clusterValues[pltIndex][1][0] is not None:\n if self.dim == 2:\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n color=col,\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n for k, col in zip(range(clusterDict[pltIndex]['noClusters']), colors):\n myMembers = self.clusterValues[pltIndex][1][0] == k\n self.actPlot = self.ax.scatter(decompositionValues[myMembers, 0],\n decompositionValues[myMembers, 1],\n decompositionValues[myMembers, 2],\n color=col,\n **dataMiningPlotOptions)\n else:\n # no ClusterLabels\n if self.dim == 2:\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n **dataMiningPlotOptions)\n else:\n for zIndex in range(len(self.zValues[pltIndex][key])):\n decompositionValues[:, 2] = self.zValues[pltIndex][key][zIndex]\n self.actPlot = self.ax.scatter(decompositionValues[:, 0],\n decompositionValues[:, 1],\n decompositionValues[:, 2],\n **dataMiningPlotOptions)\n else:\n # Let's try to \"write\" the code for the plot on the fly\n self.raiseAWarning('Trying to create a non-predefined plot of type ' + self.outStreamTypes[pltIndex] + '. If this fails, please refer to the and/or the related matplotlib method specification.')\n kwargs = {}\n for kk in plotSettings:\n if kk != 'attributes' and kk != self.outStreamTypes[pltIndex]:\n try:\n kwargs[kk] = ast.literal_eval(plotSettings[kk])\n except ValueError:\n kwargs[kk] = plotSettings[kk]\n try:\n if self.dim == 2:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n else:\n customFunctionCall = getattr(self.ax, self.outStreamTypes[pltIndex])\n self.actPlot = customFunctionCall(**kwargs)\n except AttributeError as ae:\n self.raiseAnError(RuntimeError, '<' + str(ae) + '> -> in execution custom plot \"' + self.outStreamTypes[pltIndex] + '\" in Plot ' + self.name + '.\\nSTREAM MANAGER: ERROR -> command has been called in the following way: ' + 'ax.' + self.outStreamTypes[pltIndex])\n\n if 'legend' in self.options['plotSettings']:\n self.fig.legend(**self.options['plotSettings']['legend'])\n\n # SHOW THE PICTURE\n self.__executeActions()\n self.fig.canvas.draw_idle()\n\n if 'screen' in self.destinations and display:\n def handle_close(event):\n \"\"\"\n This method is aimed to handle the closing of figures (overall when in interactive mode)\n @ In, event, instance, the event to close\n @ Out, None\n \"\"\"\n self.fig.canvas.stop_event_loop()\n self.raiseAMessage('Closed Figure')\n self.fig.canvas.mpl_connect('close_event', handle_close)\n # self.plt.pause(1e-6)\n # The following code is extracted from pyplot.pause without actually\n # needing to force the code to sleep, according to MPL's documentation,\n # this feature is experimental, hopefully by not calling the pause\n # function, we can obtain consistent results.\n # We are skipping a few of the sanity checks done in that function,\n # since we are sure we have an interactive backend and access to the\n # correct type of canvas and figure.\n self.fig.canvas.draw()\n # If your graphs are unresponsive to user input, you may want to consider\n # adjusting this timeout, to allow more time for the input to be handled.\n self.fig.canvas.start_event_loop(1e-3)\n\n # self.fig.canvas.flush_events()\n\n for fileType in self.destinations:\n if fileType == 'screen':\n continue\n\n if not self.overwrite:\n prefix = str(self.counter) + '-'\n else:\n prefix = ''\n\n if len(self.filename) > 0:\n name = self.filename\n else:\n name = prefix + self.name + '_' + str(self.outStreamTypes).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",\", \"-\").replace(\" \", \"\")\n\n if self.subDirectory is not None:\n name = os.path.join(self.subDirectory,name)\n\n self.fig.savefig(name + '.' + fileType, format=fileType)\n\n if 'screen' not in self.destinations:\n plt.close(fig=self.fig)\n\n gc.collect()",
"def onAxesId(self, value):\n if not self.skip:\n self.skip = True\n # No active plot case\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n self.skip = False\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.axId = self.widget(QtGui.QSpinBox, \"axesIndex\")\n\n form.axId.setMaximum(len(plt.axesList))\n if form.axId.value() >= len(plt.axesList):\n form.axId.setValue(len(plt.axesList) - 1)\n # Send new control to Plot instance\n plt.setActiveAxes(form.axId.value())\n self.updateUI()\n self.skip = False",
"def test_get_axes():\n fig, axs = plt.subplots()\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(axs)\n )\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(None)\n )\n with pytest.raises(TypeError):\n prettypyplot.tools.get_axes(fig)",
"def _go_to_axes(self, session, el=None, az=None, third=None):\n move_defs = []\n for axis_name, short_name, target in [\n ('Azimuth', 'az', az),\n ('Elevation', 'el', el),\n ('Boresight', 'third', third),\n ]:\n if target is not None:\n move_defs.append(\n (short_name, self._go_to_axis(session, axis_name, target)))\n if len(move_defs) is None:\n return True, 'No motion requested.'\n\n moves = yield DeferredList([d for n, d in move_defs])\n all_ok, msgs = True, []\n for _ok, result in moves:\n if _ok:\n all_ok = all_ok and result[0]\n msgs.append(result[1])\n else:\n all_ok = False\n msgs.append(f'Crash! {result}')\n\n if all_ok:\n msg = msgs[0]\n else:\n msg = ' '.join([f'{n}: {msg}' for (n, d), msg in zip(move_defs, msgs)])\n return all_ok, msg",
"def setfig(fig=None,**kwargs):\n if fig:\n plt.figure(fig,**kwargs)\n plt.clf()\n elif fig==0:\n pass\n else:\n plt.figure(**kwargs)",
"def plot_clear():\n plt.cla()",
"def _lazy_axis(self):\n raise NotImplementedError",
"def waitontarget(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)\n if not pidevice.HasqONT():\n return\n servo = getservo(pidevice, axes)\n axes = [x for x in axes if servo[x]]\n maxtime = time() + timeout\n while not all(list(pidevice.qONT(axes).values())):\n if time() > maxtime:\n raise SystemError('waitontarget() timed out after %.1f seconds' % timeout)\n sleep(polldelay)\n sleep(postdelay)",
"def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)",
"def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')",
"def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)",
"def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pass",
"def get_ax_by_name(self, name):\n try:\n return self.ax[self.ax_names.index(name)]\n except ValueError:\n print(f'No axis with name: {name}. Returning None.')\n return None"
]
| [
"0.5747913",
"0.5492766",
"0.5326451",
"0.53198856",
"0.53106946",
"0.52366793",
"0.52360547",
"0.50930434",
"0.5065622",
"0.5005657",
"0.49565744",
"0.49383938",
"0.49214408",
"0.48142612",
"0.47483495",
"0.47238323",
"0.4721073",
"0.471163",
"0.4707206",
"0.4698941",
"0.46983975",
"0.4698043",
"0.46786875",
"0.46747947",
"0.4674658",
"0.46725538",
"0.46653935",
"0.46629322",
"0.46626762",
"0.46555865"
]
| 0.5793726 | 0 |
Check if 'axis' has already been referenced with 'refmode'. | def _isreferenced(self, refmode, axis):
if self.prop['forceref']:
return False
if refmode in ('POS',):
return False
if refmode == 'ATZ':
return self.pidevice.qATZ(axis)[axis]
if refmode == 'REF':
return self.pidevice.qREF(axis)[axis]
return self.pidevice.qFRF(axis)[axis] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_ref_known(self):\r\n \r\n if \"N\" in self.ref:\r\n return False\r\n else:\r\n return True",
"def checkRefs(self, export_refs):\r\n return True",
"def _ref_with_refcmd(self, axes, refmode):\n debug('ControllerStartup._ref_with_refcmd(axes=%s, refmode=%s)', axes, refmode)\n for axis in axes:\n if self.pidevice.HasRON():\n try:\n self.pidevice.RON(axis, True)\n except GCSError as exc:\n if exc == gcserror.E34_PI_CNTR_CMD_NOT_ALLOWED_FOR_STAGE:\n pass # hexapod axis\n else:\n raise\n try:\n getattr(self.pidevice, refmode)(axis)\n except GCSError as exc:\n if exc == gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO:\n self._databuf['servobuf'][axis] = getservo(self.pidevice, axis)[axis]\n self.pidevice.SVO(axis, not self._databuf['servobuf'][axis])\n getattr(self.pidevice, refmode)(axis)\n else:\n raise\n if self.pidevice.devname in ('C-843',):\n waitonreferencing(self.pidevice, axes=axis, **self._kwargs)\n waitonready(self.pidevice)",
"def isReference(node):\n return bool(isinstance(node, nodes.Referential)\n and node.get(DuAttrRefid, None))",
"def IsReference(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsReference(*args)",
"def XCAFDoc_ShapeTool_IsReference(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsReference(*args)",
"def isReferenceField(cls, _field):\n return isinstance(_field, fields.ReferenceField)",
"def is_reference(self):\r\n return conf.lib.clang_isReference(self)",
"def testIsRef(self):\n self.assertFalse(\n self.cd.is_ref\n )\n\n self.cd.cc = cdl_convert.ColorCorrectionRef('001')\n\n self.assertTrue(\n self.cd.is_ref\n )",
"def is_reference(self):\n return self.resource.is_reference()",
"def isRef(self, elem, attr):\n if elem is None: elem__o = None\n else: elem__o = elem._o\n if attr is None: attr__o = None\n else: attr__o = attr._o\n ret = libxml2mod.xmlIsRef(self._o, elem__o, attr__o)\n return ret",
"def is_reference(self, tokenized_record):\n\n return bool(set(tokenized_record).intersection(self.ref_markers))",
"def _referencedChecker(self, entity, params):\n\n if 'ref_logic' not in params:\n return False\n\n logic = self.helper.getLogicForItem(params, 'ref_logic')\n filter = {\n params['ref_field']: entity.key()\n }\n ref_entity = logic.getForFields(filter=filter, unique=True)\n\n result = ref_entity is not None\n\n no_ref = params.get('no_ref')\n if no_ref:\n result = not result\n\n return result",
"def XCAFDoc_ShapeTool_IsExternRef(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsExternRef(*args)",
"def check_reference(ref):\n obj_ref_regex = re.compile(\"^(?P<wsid>\\d+)\\/(?P<objid>\\d+)(\\/(?P<ver>\\d+))?$\")\n ref_path = ref.strip().split(\";\")\n for step in ref_path:\n if not obj_ref_regex.match(step):\n return False\n return True",
"def _is_ref_prop(name):\n return name.endswith(\"_ref\") or name.endswith(\"_refs\")",
"def isRef(self, doc, attr):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if attr is None: attr__o = None\n else: attr__o = attr._o\n ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o)\n return ret",
"def isSetReference(self):\n return _libsbml.Association_isSetReference(self)",
"def referencewait(self):\n debug('ControllerStartup.referencewait()')\n if not self.refmodes or self.prop['skipref']:\n return\n self._databuf['servobuf'] = getservo(self.pidevice, self.pidevice.axes)\n toreference = {} # {cmd: [axes]}\n for i, refmode in enumerate(self._refmodes[:self.pidevice.numaxes]):\n if not refmode:\n continue\n axis = self.pidevice.axes[i]\n refmode = refmode.upper()\n if refmode not in toreference:\n toreference[refmode] = []\n if self._isreferenced(refmode, axis):\n debug('axis %r is already referenced by %r', axis, refmode)\n else:\n toreference[refmode].append(self.pidevice.axes[i])\n waitonaxes = []\n for refmode, axes in toreference.items():\n if not axes:\n continue\n if refmode == 'POS':\n self._ref_with_pos(axes)\n elif refmode == 'ATZ':\n self._autozero(axes)\n else:\n self._ref_with_refcmd(axes, refmode)\n waitonaxes += axes\n waitonreferencing(self.pidevice, axes=waitonaxes, **self._kwargs)",
"def _isReferencedChecker(self, entity, params):\n\n params['no_ref'] = False\n return self._referencedChecker(entity, params)",
"def _changed_canvas(self):\n return self.canvas is not self.ax.figure.canvas",
"def is_reference(type):\n nake_type = remove_alias(type)\n return isinstance(nake_type, cpptypes.reference_t)",
"def IsExternRef(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsExternRef(*args)",
"def should_check_refcount(self):\n raise NotImplementedError()",
"def isSetReferenceId(self):\n return _libsbml.GeneralGlyph_isSetReferenceId(self)",
"def isSetReferenceId(self):\n return _libsbml.ReferenceGlyph_isSetReferenceId(self)",
"def exists_ref(self, commit_id):\n pass",
"def _must_add_reference_var(\n climate_vars_dict: dict[str, InFileDictionary],\n reference_period: Sequence[str] | None,\n) -> bool:\n t = list(climate_vars_dict.values())[0].get(\"thresholds\", None)\n return t is None and len(climate_vars_dict) == 1 and reference_period is not None",
"def is_hom_ref(self) -> bool:\n return self.is_hom() and (self.allele1 == 0 or self.allele2 == 0)",
"def is_valid_tid(self, tid):\n return True if tid in self._header.refs else False"
]
| [
"0.62752515",
"0.6220863",
"0.6005233",
"0.59288573",
"0.5925373",
"0.58923024",
"0.58557653",
"0.5796622",
"0.570388",
"0.5672749",
"0.5603658",
"0.5603465",
"0.55883646",
"0.55624634",
"0.55470496",
"0.55324847",
"0.5511487",
"0.5494428",
"0.5420732",
"0.53797483",
"0.5354106",
"0.5338382",
"0.531562",
"0.5306501",
"0.53029305",
"0.5295459",
"0.5263716",
"0.52182025",
"0.51854974",
"0.5159542"
]
| 0.7821823 | 0 |
Enable RON, change servo state if appropriate and reference 'axes' with the 'refmode' command. | def _ref_with_refcmd(self, axes, refmode):
debug('ControllerStartup._ref_with_refcmd(axes=%s, refmode=%s)', axes, refmode)
for axis in axes:
if self.pidevice.HasRON():
try:
self.pidevice.RON(axis, True)
except GCSError as exc:
if exc == gcserror.E34_PI_CNTR_CMD_NOT_ALLOWED_FOR_STAGE:
pass # hexapod axis
else:
raise
try:
getattr(self.pidevice, refmode)(axis)
except GCSError as exc:
if exc == gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO:
self._databuf['servobuf'][axis] = getservo(self.pidevice, axis)[axis]
self.pidevice.SVO(axis, not self._databuf['servobuf'][axis])
getattr(self.pidevice, refmode)(axis)
else:
raise
if self.pidevice.devname in ('C-843',):
waitonreferencing(self.pidevice, axes=axis, **self._kwargs)
waitonready(self.pidevice) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1",
"def turn_on(self):\n # read out the current pose of the robot\n configuration = self.robot.get_all_servo_position()\n\n # interpolate to the default position\n interpolation_time = 3000 # ms\n interpolation_steps = interpolation_time // TIME_FRAME\n\n speed = np.zeros(18)\n for i in range(18):\n speed[i] = (SERVOS_BASE[i] - configuration[i]) / interpolation_steps\n\n # execute the motion\n for t in range(interpolation_steps):\n self.robot.set_all_servo_position(configuration + t * speed)",
"def set_simulation_robot(self, on_off):\r\n return self._arm.set_simulation_robot(on_off)",
"def enable_robot(self):\n self._franka_robot_enable_interface.enable()",
"def _ref_with_pos(self, axes):\n debug('ControllerStartup._ref_with_pos(axes=%s)', axes)\n assert self.pidevice.HasPOS(), 'controller does not support the POS command'\n self.pidevice.RON(axes, [False] * len(axes))\n self.pidevice.POS(axes, [0.0] * len(axes))\n waitonready(self.pidevice, **self._kwargs)\n self.pidevice.SVO(axes, [True] * len(axes)) # A following qONT will fail if servo is disabled.",
"def poweron(self) -> None:\n self.servo_reset()",
"def enable_motor(self, enabled):\r\n self.enabled = enabled\r\n\r\n # Set motors in neutral if disabling.\r\n if not self.enabled:\r\n self.set_neutral()",
"def set_control_commands(self, ref_state, ref_ind):\n if not self.at_dest:\n self.commands['speed'] = self.cruising_speed * (5. / self.traffic_level)\n else:\n self.commands['speed'] = 0.0\n dx = ref_state[0] - self.x\n dy = ref_state[1] - self.y\n dx_v = numpy.cos(self.yaw) * dx + numpy.sin(self.yaw) * dy\n\n # To overtake, move to the left a little bit and follow your original traj.\n stay_overtake = False\n if self.overtake:\n self.overtake_begin_ignore += 1\n else:\n self.overtake_begin_ignore = 0\n if self.overtake and len(self.radar_readings[0, :]) > 0:\n stay_overtake = numpy.min(self.radar_readings[0, :]) > 30\n rospy.logerr(self.overtake_begin_ignore)\n if self.overtake_begin_ignore < 3:\n stay_overtake = True\n if not stay_overtake:\n self.overtake = False\n self.overtake_begin_counter = 0\n self.commands['speed'] *= 0\n # rospy.logerr('chcek for stay overtaking: ' + str(stay_overtake))\n else:\n stay_overtake = True\n\n if self.overtake and stay_overtake:\n self.commands['speed'] *= 1.5\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy + 7.5\n else:\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy\n dyaw_v = ref_state[2] - self.yaw\n # Correct yaw difference. dyaw_v 0..pi\n while dyaw_v > numpy.pi:\n dyaw_v -= 2*numpy.pi\n while dyaw_v < -numpy.pi:\n dyaw_v += 2*numpy.pi\n # Calculate steering command from dy_v, dx_v and dyaw_v\n steering_command = dy_v + dyaw_v * 1.5 / (1 + dx_v)\n # Compare with max steering angle\n if steering_command > 0.5:\n steering_command = 0.5\n elif steering_command < -0.5:\n steering_command = -0.5\n self.commands['steering_angle'] = steering_command",
"def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)",
"def setservo(pidevice, axes, states=None, toignore=None, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n if not pidevice.HasSVO():\n return False\n if not axes:\n return True\n axes, states = getitemsvaluestuple(axes, states)\n if pidevice.HasRNP():\n axestorelax = [axis for axis, state in list(getservo(pidevice, axes).items()) if not state]\n if axestorelax:\n pidevice.RNP(axestorelax, [0.0] * len(axestorelax))\n waitonready(pidevice, **kwargs)\n eaxaxes = [axes[i] for i in range(len(axes)) if states[i]]\n enableaxes(pidevice, axes=eaxaxes, **kwargs)\n success = True\n toignore = [] if toignore is None else toignore\n toignore = [toignore] if not isinstance(toignore, list) else toignore\n toignore += [gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO, gcserror.E23_PI_CNTR_ILLEGAL_AXIS]\n for i, axis in enumerate(axes):\n try:\n pidevice.SVO(axis, states[i])\n except GCSError as exc: # no GCSRaise() because we want to log a warning\n if exc in toignore:\n debug('could not set servo for axis %r to %s: %s', axis, states[i], exc)\n success = False\n else:\n raise\n waitonready(pidevice, **kwargs)\n return success",
"def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )",
"def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)",
"def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True",
"def set_light_on(self):\r\n self._light = \"ON\"",
"def clickDarkReference(self, event):\n if self.darkReference is None:\n self.darkReference = self.spectrometer.getSpectrum()\n self.darkBtn.color = '0.99'\n else:\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()",
"def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light",
"def turn_on(self, **kwargs):\n self.set_graceful_lock(True)\n self.robot.start_cleaning()",
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def set_relay_mode(momentary_mode_on, momentary_follow_sense, momentary_on_off):\n self._momentary_mode_on_prop.new_value = momentary_mode_on\n self._momentary_follow_sense_prop.new_value = momentary_follow_sense\n self._momentary_on_off_trigger_prop.new_value = momentary_on_off",
"def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)",
"def turnOnRtsfromActioncenter():\n pass",
"def referencewait(self):\n debug('ControllerStartup.referencewait()')\n if not self.refmodes or self.prop['skipref']:\n return\n self._databuf['servobuf'] = getservo(self.pidevice, self.pidevice.axes)\n toreference = {} # {cmd: [axes]}\n for i, refmode in enumerate(self._refmodes[:self.pidevice.numaxes]):\n if not refmode:\n continue\n axis = self.pidevice.axes[i]\n refmode = refmode.upper()\n if refmode not in toreference:\n toreference[refmode] = []\n if self._isreferenced(refmode, axis):\n debug('axis %r is already referenced by %r', axis, refmode)\n else:\n toreference[refmode].append(self.pidevice.axes[i])\n waitonaxes = []\n for refmode, axes in toreference.items():\n if not axes:\n continue\n if refmode == 'POS':\n self._ref_with_pos(axes)\n elif refmode == 'ATZ':\n self._autozero(axes)\n else:\n self._ref_with_refcmd(axes, refmode)\n waitonaxes += axes\n waitonreferencing(self.pidevice, axes=waitonaxes, **self._kwargs)",
"def turnOn(self):\n self.off = False\n self.turnOnAnimation()",
"def set_light_on(self):\n self._light = \"ON\"",
"def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())",
"def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))",
"def turn_on(self, **kwargs):\n _LOGGER.debug(\"Turning on Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"motion\")",
"def turn_on(\n self,\n speed: str = None,\n percentage: int = None,\n preset_mode: str = None,\n **kwargs,\n ) -> None:\n self.wink.set_state(True, speed)"
]
| [
"0.6464424",
"0.60598856",
"0.57750314",
"0.5707349",
"0.57046753",
"0.566332",
"0.5652935",
"0.5643225",
"0.5640739",
"0.560289",
"0.55447114",
"0.54740083",
"0.54580873",
"0.5457564",
"0.540518",
"0.53942823",
"0.5375577",
"0.53675616",
"0.5366244",
"0.53644043",
"0.5353949",
"0.53517795",
"0.5311608",
"0.53102267",
"0.53101337",
"0.5309479",
"0.53073",
"0.5302573",
"0.52960163",
"0.5280775"
]
| 0.6683219 | 0 |
Autozero 'axes' and move them to position "0.0". | def _autozero(self, axes):
debug('ControllerStartup._autozero(axes=%s)', axes)
self.pidevice.ATZ(axes, ['NaN'] * len(axes))
waitonautozero(self.pidevice, axes, **self._kwargs)
setservo(self.pidevice, axes, [True] * len(axes), **self._kwargs)
moveandwait(self.pidevice, axes, [0.0] * len(axes), **self._kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moveToZero(self):\n\t\tself.grp.a.t.v = [0,0,0]\n\t\tself.grp.a.r.v = [0,0,0]",
"def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)",
"def reset(self):\n self.xview_moveto(0)\n self.yview_moveto(0)\n self.zoomMap(1, 0, 0)",
"def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))",
"def noAxisLines(axes='XYZ'):\n dislin.frame(0)\n dislin.noline(axes)",
"def fix_auto(self):\n if self.share_x:\n self.rescale_axes(x=True, y=False)\n self.fix_axes_ticks(axis='x')\n if self.share_y:\n self.rescale_axes(x=False, y=True)\n self.fix_axes_ticks(axis='y')",
"def auto_adjust_axes(self, *args):\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n self.adjust_axes(xmin, ymin, xmax, ymax)",
"def clear_axes_selection(self):\n self.x_axis = ''\n self.y_axis = ''\n self.non_numeric_x_axis = False\n self.count_desired = False\n self.header_choices('x')",
"def reset_axis_counters(self):\n\n self.column_counter = 0\n self.row_counter = 0",
"def setAllZero(self):\n self.robot.set_joint([0,0,0,0,0])\n self.robot.save_config()",
"def apply_transforms(self):\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)",
"def plot_clear():\n plt.cla()",
"def zero(self):\n\t\tself.angle = 0.0\n\t\tself.draw()\n\t\ttime.sleep(self.delay)",
"def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])",
"def reset(self):\n self.x_pos1 = 0\n self.x_pos2 = self.x_pos1 + self.width\n self.y_pos = self.offset_y\n self.velocity = self.origin_velocity",
"def clear(self):\n\n # Clear\n self.axes.cla()\n try:\n self.figure.clf()\n except KeyError:\n FlatCAMApp.App.log.warning(\"KeyError in MPL figure.clf()\")\n\n # Re-build\n self.figure.add_axes(self.axes)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # Re-draw\n self.canvas.draw_idle()",
"def toggle_zero_grid(self, x):\r\n self.konfig.zero.set_grid(x)\r\n self.zeroGraf.toggle_grid(x)",
"def reset(self):\n self._x = 0\n self._y = 0",
"def set_zero(self, loc=None):\n self.Y[loc] -= self.Y[loc]",
"def move_zero_order(kwargs=None):\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis([True, True, True])\n xpos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_xmax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_xmin\"])\n ) / 2.0\n ypos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_ymax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_ymin\"])\n ) / 2.0\n zpos = (\n float(self.variables.devices_dict[\"Table_control\"][\"table_zmax\"])\n - float(self.variables.devices_dict[\"Table_control\"][\"table_zmin\"])\n ) / 2.0\n errorcode = self.variables.table.move_to(\n [xpos, ypos, zpos],\n False,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if errorcode:\n # self.variables.message_to_main.put(errorcode)",
"def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)",
"def reset_position(self):\n self.goto(STARTING_POSITION)",
"def center(self):\n if self.pos != 0.0:\n self.pos = 0.0",
"def setup_mpl_visuals(self, axes=None) -> None:\n if axes is None:\n axes = self.subplot\n axes.patch.set_facecolor('white')\n axes.set_aspect('equal', 'box')\n axes.set_xlim(-10, 10, auto=True)\n axes.set_ylim(-10, 10, auto=True)\n # TODO: Make XYLim confort to window size/dimensions\n axes.set_xticks([])\n axes.set_yticks([])\n self.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\n axes.axis('off')",
"def emit_reset(self):\n for name in self.layout.axes:\n params = self.layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n self.write_event(ecodes.EV_ABS, name, int(sum(params[1:3]) / 2))\n\n for name in self.layout.buttons:\n self.write_event(ecodes.EV_KEY, name, False)\n\n for name in self.layout.hats:\n self.write_event(ecodes.EV_ABS, name, 0)\n\n self.device.syn()",
"def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def reset_position(self):\n self.translate_to_point_O()\n\n # inverse rotation:\n rotation_matrix = np.stack(\n (self.pcs.i_hat, self.pcs.j_hat, self.pcs.k_hat), axis=0\n )\n\n self.rotate(rotation_matrix)",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def clean_axes(axl):\n cleanAxes(axl)"
]
| [
"0.6837609",
"0.64958775",
"0.61919284",
"0.59692055",
"0.59586036",
"0.59518695",
"0.5851395",
"0.58473384",
"0.5823534",
"0.58203787",
"0.5774866",
"0.5774172",
"0.57657653",
"0.5747361",
"0.5743723",
"0.5738721",
"0.57278085",
"0.5712758",
"0.57062715",
"0.56823045",
"0.56535506",
"0.5629119",
"0.5618699",
"0.5588711",
"0.55755144",
"0.55731887",
"0.55687606",
"0.55643517",
"0.5543714",
"0.55431217"
]
| 0.7430454 | 0 |
Set RON accordingly and reference 'axes' with the POS command to position "0.0". | def _ref_with_pos(self, axes):
debug('ControllerStartup._ref_with_pos(axes=%s)', axes)
assert self.pidevice.HasPOS(), 'controller does not support the POS command'
self.pidevice.RON(axes, [False] * len(axes))
self.pidevice.POS(axes, [0.0] * len(axes))
waitonready(self.pidevice, **self._kwargs)
self.pidevice.SVO(axes, [True] * len(axes)) # A following qONT will fail if servo is disabled. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])",
"def set_relpos(self, relpos):\n self.relpos = relpos",
"def setPose(self, newPosition):\n self.origin1 = newPosition\n self.axis1 = self.G_gl[0:3, 0:3] @ self.axis0",
"def set_axes(self, a):\r\n self.axes = a",
"def setBarPosition(pos='none'):\n bdict = {'none':'NONE','ticks':'TICKS','axis':'AXIS','both':'BOTH'}\n dislin.barpos(bdict[pos])",
"def draw_axes(self, cr):\n # en gris\n cr.set_line_width(0.02)\n cr.set_source_rgb(0.3, 0.3, 0.3)\n cr.move_to( -1,0 )\n cr.line_to( 1,0 )\n cr.move_to( 0, -1 )\n cr.line_to( 0, 1 )\n cr.stroke()\n #self.draw_value( cr, \"0\", 0, 0 )\n #self.draw_value( cr, \"1\", 5-0.3, 0 )\n #self.draw_value( cr, \"2\", 2+0.3, 4-0.5 )",
"def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)",
"def set_base_xpos(self, pos):\n node = self.worldbody.find(\"./body[@name='base']\")\n node.set(\"pos\", array_to_string(pos - self.bottom_offset))",
"def set_base_xpos(self, pos):\n node = self.worldbody.find(\"./body[@name='base']\")\n node.set(\"pos\", array_to_string(pos - self.bottom_offset))",
"def setPos(self, pos):\n self.cameraNode.setPos(pos)",
"def setTickPosition(position='sameaslabels', axes='XYZ'):\n tickdict = {'sameaslabels':'LABELS', 'inside':'REVERS','center':'CENTER'}\n dislin.ticpos(tickdict[position],axes)",
"def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z",
"def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien",
"def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)",
"def setPos(self,pos):\n self.Xpos,self.Ypos=pos",
"def _ref_with_refcmd(self, axes, refmode):\n debug('ControllerStartup._ref_with_refcmd(axes=%s, refmode=%s)', axes, refmode)\n for axis in axes:\n if self.pidevice.HasRON():\n try:\n self.pidevice.RON(axis, True)\n except GCSError as exc:\n if exc == gcserror.E34_PI_CNTR_CMD_NOT_ALLOWED_FOR_STAGE:\n pass # hexapod axis\n else:\n raise\n try:\n getattr(self.pidevice, refmode)(axis)\n except GCSError as exc:\n if exc == gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO:\n self._databuf['servobuf'][axis] = getservo(self.pidevice, axis)[axis]\n self.pidevice.SVO(axis, not self._databuf['servobuf'][axis])\n getattr(self.pidevice, refmode)(axis)\n else:\n raise\n if self.pidevice.devname in ('C-843',):\n waitonreferencing(self.pidevice, axes=axis, **self._kwargs)\n waitonready(self.pidevice)",
"def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()",
"def set_position(self, pos, debug=False):\n pos = max(pos, 0)\n pos = min(pos, 1)\n posrange = pos * self.range\n pos = posrange + self.min\n if debug:\n print('Setting Dynamixel {} with posrange {} to position {}'.format(self.id, posrange, pos))\n self.motor.set_position(int(pos))",
"def set_ini_positions(self):\n self.command_DAQ_signal.emit([\"set_ini_positions\"])",
"def setReciproque(self, reciproque):\n x0, x1 = reciproque(0), reciproque(1)\n self.slope = 1 / (x1 - x0)\n self.ordinate = - x0 * self.slope",
"def set_xpos(self, deg):\n if deg < 0:\n deg = 0\n if deg > 90:\n deg = 90\n deg = deg*2\n self.kit.servo[7].angle = deg",
"def set_position(self, pos):\n self.ref_pos = pos",
"def setOriginLines(val=\"xy\"):\n if val == \"x\":\n dislin.xaxgit()\n elif val == \"y\":\n dislin.yaxgit()\n elif val == \"cross\":\n dislin.cross()\n else:\n dislin.axgit()",
"def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )",
"def odom_update(self, data):\n self.curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)",
"def set_position(self, axis, x):\n\n if not self.enabled:\n return\n\n self.send_cmd(axis, ' POS={:.3f}'.format(x))\n return float(self.get_position(axis))",
"def update_pos(self):\n s = self\n s.rpos = s.rects[0].inf\n s.pos = s.physics.scl_coord_res(s.rpos)",
"def format_axes():\n\n plt.axes(frameon=False)\n plt.axvline(0, PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim, color='k')\n plt.tick_params(which='both', bottom='off', top='off', right='off', labelbottom='off')\n plt.xlim(0, PlotParameter.x_axis_right_lim)\n plt.ylim(PlotParameter.y_axis_bot_lim, PlotParameter.y_axis_top_lim)\n plt.ylabel(PlotParameter.y_axis_label)",
"def setColorBarPositionHoriz(pos):\n dislin.vkxbar(pos)",
"def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)"
]
| [
"0.5808343",
"0.57251",
"0.56590384",
"0.5630359",
"0.5607852",
"0.56060356",
"0.5588824",
"0.5571485",
"0.5571485",
"0.5540956",
"0.5528975",
"0.5477126",
"0.54330236",
"0.5341452",
"0.53389716",
"0.5327909",
"0.5302402",
"0.5279917",
"0.52782357",
"0.5261827",
"0.52607894",
"0.5257464",
"0.5250002",
"0.5227752",
"0.522176",
"0.5213098",
"0.5206452",
"0.5200127",
"0.51982397",
"0.5196478"
]
| 0.69301665 | 0 |
Enable all connected axes if appropriate. | def enableaxes(self):
debug('ControllerStartup.enableaxes()')
if not self.pidevice.HasEAX() or self.prop['skipeax']:
return
for axis in self.pidevice.axes:
try:
self.pidevice.EAX(axis, True)
except GCSError as exc:
if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:
raise
waitonready(self.pidevice, **self._kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setAxisAllColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'ALL', axes)",
"def set(self, **kwargs):\n for ax in self.axes.flat:\n ax.set(**kwargs)\n return self",
"def enable(self):\n self.enabled = True\n for child in self.children:\n child.enable()",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def show_axes(self):\n if hasattr(self, 'axes_widget'):\n self.axes_widget.EnabledOn()\n self.axes_widget.SetCurrentRenderer(self)\n else:\n self.add_axes()\n self.Modified()",
"def enable(self):\n\n self._slo_examples_per_batch.enable()\n self._slo_number_of_epochs.enable()\n self._slo_neural_network.enable()\n self._slo_image_size.enable()\n super().enable()",
"def add_axes(self, ax):\n self._canvas.cd()\n self._axes = ax\n self._canvas.Modified()",
"def enable(self):\n for val in data:\n val.enable()\n self.enabled = True",
"def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)",
"def enable_all(self, enable):\n logging.debug(\"Called enable_all with value {}\".format(enable))\n\n if enable:\n # Loop over all quads and channels in system, adding enable command to deferred\n # executor queue\n for quad_idx in range(len(self.quad)):\n for channel in range(Quad.NUM_CHANNELS):\n self.deferred_executor.enqueue(\n self.quad_enable_channel, self.quad_enable_interval, quad_idx, channel\n )\n self.__all_enabled = True\n else:\n # Clear any pending turn-on command from the queue first, then turn off all channels\n # immediately.\n num_enables_pending = self.deferred_executor.pending()\n if num_enables_pending > 0:\n logging.debug(\"Clearing {} pending quad enable commands from queue\".format(\n num_enables_pending\n ))\n self.deferred_executor.clear()\n for quad_idx in range(len(self.quad)):\n for channel in range(Quad.NUM_CHANNELS):\n self.quad[quad_idx].set_enable(channel, False)\n self.__all_enabled = False",
"def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)",
"def enable_plot(self):\n n_t = 0\n n_t_t = 0\n if self.tree_ctrl is not None:\n n_t = self.tree_ctrl.GetCount()\n if self.tree_ctrl_theory is not None:\n n_t_t = self.tree_ctrl_theory.GetCount()\n if n_t + n_t_t <= 0:\n self.bt_plot.Disable()\n else:\n self.bt_plot.Enable()\n self.enable_append()",
"def set_axes(self, a):\r\n self.axes = a",
"def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False",
"def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()",
"def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()",
"def enable_all_devices(self, enable_ir_emitter=False):\n print(\"{} devices have been found\".format(len(self._available_devices)))\n\n for serial in self._available_devices:\n self.enable_device(serial)",
"def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])",
"def enable_output_quantizers(self, enabled: bool) -> None:\n for quantizer in self.output_quantizers:\n quantizer.enabled = enabled",
"def enable_all_links(links):\n for link in links:\n link.set_enabled(True)",
"def setup_mpl_visuals(self, axes=None) -> None:\n if axes is None:\n axes = self.subplot\n axes.patch.set_facecolor('white')\n axes.set_aspect('equal', 'box')\n axes.set_xlim(-10, 10, auto=True)\n axes.set_ylim(-10, 10, auto=True)\n # TODO: Make XYLim confort to window size/dimensions\n axes.set_xticks([])\n axes.set_yticks([])\n self.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\n axes.axis('off')",
"def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)",
"def axes_enabled(self):\n if hasattr(self, 'axes_widget'):\n return bool(self.axes_widget.GetEnabled())\n return False",
"def enable_act_quantizers(self, enabled: bool) -> None:\n self.enable_input_quantizers(enabled)\n self.enable_output_quantizers(enabled)",
"def deactivate_all(self):\n\t self.active_nodes = [False for i in range(self.nodes)]",
"def enable_input_quantizers(self, enabled: bool) -> None:\n for quantizer in self.input_quantizers:\n quantizer.enabled = enabled",
"def showAll(cls):\n OK = cls.ph.doForAll('show', noShow=True)\n if OK: cls.plt.show()\n cls.ph.doForAll('clear')\n # They should all have removed themselves now, but what the\n # heck, clear it anyways\n cls.ph.removeAll()",
"def enable(widget_list: list) -> None:\r\n\r\n for widget in widget_list:\r\n widget.configure(state='normal')",
"def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()",
"def set_grid_alpha(self, alpha):\n for i in range(self.num_labels):\n grid = self.grid[i]\n grid.alpha = alpha"
]
| [
"0.6418385",
"0.6268001",
"0.6101318",
"0.5986963",
"0.59757614",
"0.575294",
"0.5737837",
"0.56969994",
"0.56938237",
"0.5679895",
"0.56635314",
"0.5643768",
"0.5632977",
"0.5628028",
"0.5533893",
"0.5533893",
"0.5528595",
"0.54902184",
"0.53917605",
"0.5374233",
"0.5358677",
"0.5355838",
"0.53455555",
"0.53223777",
"0.52924323",
"0.5276098",
"0.5270405",
"0.5250704",
"0.5235671",
"0.5230441"
]
| 0.6553111 | 0 |
Write 'wavepoints' for 'wavetable' in bunches of 'bunchsize'. The 'bunchsize' is device specific. Please refer to the controller manual. | def writewavepoints(pidevice, wavetable, wavepoints, bunchsize=None):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
wavepoints = wavepoints if isinstance(wavepoints, (list, set, tuple)) else [wavepoints]
if bunchsize is None:
bunchsize = len(wavepoints)
for startindex in range(0, len(wavepoints), bunchsize):
bunch = wavepoints[startindex:startindex + bunchsize]
pidevice.WAV_PNT(table=wavetable, firstpoint=startindex + 1, numpoints=len(bunch),
append='&' if startindex else 'X', wavepoint=bunch) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trackBunchTurns(self, bunch):\n\t\tturns = self.__turns\n\t\t#start\n\t\tfor i in range(turns-1):\t\t\t\n\t\t\tself.trackBunch(bunch)\t\n\t\t\tsyncPart = bunch.getSyncParticle()\n\t\t\ttime = syncPart.time()\n\t\t\tself.setTimeDepStrength(time)\n\t\t\tprint \"debug trackBunchTurns time\",time,\"in\",i,\"turn\"\n\t\t#getsublattice\n\t\t#sublattice.trackBunch(bunch)",
"def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)",
"def next_wave(self):\n if self._wave == self._level.get_max_wave():\n return\n\n self._wave += 1\n\n #Task 1.3 (Status Bar): Update the current wave display here\n self._status_bar.set_wave(self._wave)\n\n #Task 1.5 (Play Controls): Disable the add wave button here (if this is the last wave)\n if self._wave == 20:\n self._wave_button.config(state=tk.DISABLED)\n\n #Generate wave and enqueue\n wave = self._level.get_wave(self._wave, self._game)\n for step, enemy in wave:\n enemy.set_cell_size(self._game.grid.cell_size)\n\n self._game.queue_wave(wave)",
"def stubb_fitlers(wave_min=350., wave_max=1050):\n throughPath = os.path.join(getPackageDir('throughputs'), 'baseline')\n bps = {}\n lsstKeys = ['u', 'y']\n bps = {}\n for key in lsstKeys:\n bp = np.loadtxt(os.path.join(throughPath, 'total_'+key+'.dat'),\n dtype=zip(['wave', 'trans'], [float]*2))\n bpTemp = Bandpass()\n good = np.where((bp['trans'] > 0.) & (bp['wave'] > wave_min) & (bp['wave'] < wave_max))\n bpTemp.setBandpass(bp['wave'], bp['trans'], wavelen_min=bp['wave'][good].min(),\n wavelen_max=bp['wave'][good].max())\n bps[key+'_truncated'] = bpTemp\n return bps",
"def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec",
"def configure_power_sweep(\n self, freq, start_power, stop_power, *, points=None, ifbw=None\n ):\n self.sweep.type = Sweep.POWER\n self.freq_cw = freq\n self.SOURce.POWer[1].STARt.w(\n start_power\n ) # The port number suffix on POWer is ignored by the instrument\n self.SOURce.POWer[1].STOP.w(stop_power)\n if points:\n self.sweep.points = points\n if ifbw:\n self.ifbw = ifbw",
"def dump_psth_peaks(ffname, outprefix, celltype, window=100e-3, binwidth=5e-3):\n with open('{}_psth_{}_{}ms_window_{}ms_bins.csv'.format(outprefix, celltype, window*1e3, binwidth*1e3), 'wb') as fd:\n writer = csv.writer(fd, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n dbcnt_flist = get_dbcnt_dict(ffname)\n bins = np.arange(-window / 2.0, window / 2.0 + 0.5 * binwidth, binwidth)\n writer.writerow(['dbcount', 'filename'] + list(np.asarray(np.round(bins[1:]*1e3), dtype=int)))\n for dbcnt, flist in dbcnt_flist.items():\n for fname in flist:\n data = TraubData(makepath(fname))\n pop_train_list = []\n bgtimes, probetimes = get_stim_times(data, correct_tcr=True)\n if (len(bgtimes) == 0) and (len(probetimes) == 0):\n print 'EE: {} has no TCR spiking on stimulus.'.format(fname)\n continue\n stim_times = np.concatenate((bgtimes, probetimes))\n stim_times.sort()\n # print '###', stim_times\n for cell, train in data.spikes.items():\n if cell.startswith(celltype):\n pop_train_list.append(train)\n pop_train = np.concatenate(pop_train_list)\n pop_train.sort()\n \n bgpsth, b = psth(pop_train, stim_times, window=window, bins=bins)\n bgpsth /= (data.cellcounts._asdict()[celltype] * binwidth)\n writer.writerow([dbcnt, fname] + list(bgpsth))",
"def set_waveform():\n waveform = request.params.get(\"waveform\", 0, type=int)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_GenWaveform(output, ctypes.c_int(waveform))\n if retval != 0:\n LOG.error(\"Failed to set waveform of the signal generator. Error code: %s\",\n ERROR_CODES[retval])",
"def upload_beam(\n pointing_list,\n obsid,\n begin,\n end,\n vcstools_version=None,\n mwa_search_version=None,\n mwa_search_command=None,\n supercomputer_id=None\n ):\n # Get versions\n if vcstools_version is None:\n import vcstools.version\n vcstools_version = vcstools.version.__version__\n if mwa_search_version is None:\n import mwa_search.version\n mwa_search_version = mwa_search.version.__version__\n if supercomputer_id is None:\n supercomputer_id = find_supercomputer_id()\n\n # Set up data to upload\n data_list = []\n for point in pointing_list:\n raj, decj = point.split(\"_\")\n rad, decd = sex2deg(raj, decj)\n data = {'ra_degrees': rad,\n 'dec_degrees': decd,\n 'begin_gps_seconds': begin,\n 'end_gps_seconds': end,\n 'supercomputer_id': supercomputer_id,\n 'vcstools_version': vcstools_version,\n 'mwa_search_version': mwa_search_version,\n 'mwa_search_command': mwa_search_command,\n 'observation_id': obsid}\n data_list.append(data)\n print(\"Staging beam (RA {}, Dec {})\".format(data['ra_degrees'], data['dec_degrees']))\n\n upload_wrapper(data_list, 'beams')",
"def set_wavelength(self, wavelength: float) -> None:\n\n assert isinstance(wavelength, float), \"Incompatible type\"\n\n #:SENSe[n][:CHANnel[m]]:POWer:WAVelength /?\n self._inst.write(\"SENS:POW:WAV {}\".format(wavelength))",
"def add_new_flash_to_list(self, iteration, powerups, resized_w, resized_h):\n if iteration % 500 == 0:\n new_speed = self.add_new_figure(resized_w, resized_h)\n powerups.append(new_speed)\n\n return powerups",
"def writetone(self, call_vector, duration):\n if duration == 0:\n return\n samples = int(self.sample_rate * duration)\n values = []\n fvector = (self.fm_freq, self.hfm_freq, self.sv_freq)\n for i in range(0, samples):\n try:\n if type(fvector[1]) == tuple:\n tone = self.__get_waveval2(i, call_vector, fvector)\n else:\n tone = self.__get_waveval(i, call_vector, fvector)\n except ValueError:\n print \"ERROR: Sum of calls cannot exceed max calls\"\n print \"Cleaning up...\"\n print \"No files written.\"\n os.remove(self.output)\n exit(1)\n signal = wave.struct.pack('h', tone) # convert to binary\n values.append(signal)\n # Buffer values every 5 seconds (22050 samples)\n if len(values) >= 220500:\n value_string = \"\".join(values)\n self.file.writeframes(value_string)\n # Clear values array\n del values[0:len(values)]\n value_string = \"\".join(values)\n self.file.writeframes(value_string)",
"def set_wave(self, wave, waves_cnt):\n\n wave_str = \"WAVE {0}/{1}\".format(wave, waves_cnt)\n self.wave_label.element.text = wave_str",
"def set_UL_bw(self,oid, wavelength, pkt_size, timestamp):\n try: \n aux = self.wavelengths[wavelength]\n except:\n print \"CREATED WAVELENGTH %d\" %wavelength\n self.create_wavelength(wavelength)\n\n sec_before = self.wavelengths[wavelength]['sec_UL_timestamp']\n if self.env.now > (sec_before + 1):\n #Then a second has passed. Time to flush counter\n print \"MONITOR - passou 1 segundo - %f > %f\" % (self.env.now, sec_before)\n print \"WAVELENGTH %d - SECOND BPS == %f\" % (wavelength,self.wavelengths[wavelength]['UL_bps'])\n self.wavelengths[wavelength]['last_UL_bps'] = self.wavelengths[wavelength]['UL_bps']\n self.wavelengths[wavelength]['UL_bps'] = 0\n self.wavelengths[wavelength]['sec_UL_timestamp'] = self.env.now\n \n self.wavelengths[wavelength]['UL_bps']+=pkt_size\n self.wavelengths[wavelength]['last_UL_timestamp'] = self.env.now\n\n self.bandwidth_UL_file.write(\"{},{},{},{}\\n\".format(wavelength,oid,pkt_size,timestamp))",
"def put_waveform(self, value):\n self.put_value(value)",
"def CC_wdw(self):\n # Setup param\n loc = 'TSdata'\n if 'single' == self.newParam['survey_type']:\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc).shape[0]\n elif 'multiple' == self.newParam['survey_type']:\n TS_group = dt.utilities.DB_group_names(self.Database, group_name = loc)[0]\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc+'/'+TS_group).shape[0]\n\n param = self.newParam\n\n # Assign TS processing length to end_wdws if given\n if param['end_wdws']:\n TS_sig_len = param['end_wdws']\n else:\n TS_sig_len = TS_len\n\n ERROR_MESSAGE = 'The length of a TS signal to be processed is', TS_sig_len, \\\n 'which is < end of the last window'\n\n # Calculate wdwPos for overlapping windows of ww_ol if wdwPos is False\n if param['wdwPos'][0] is False:\n # Error checks\n if TS_sig_len < self.newParam['ww'][0]:\n raise Warning(ERROR_MESSAGE)\n\n wdwStep = np.floor(param['ww'][0] *\n (100 - param['ww_ol']) / 100)\n\n if self.verbose: print('* Length fo TSdata', TS_len)\n\n max_wdwPos = TS_sig_len - param['ww'][0] + 1\n wdwStarts = np.arange(0 + param['sta_wdws'], max_wdwPos, wdwStep).astype(int)\n\n if self.verbose: print('* The step in window potions is %s sample points' % wdwStep)\n if self.verbose: print('* The max window postions is %s sample points'% max_wdwPos)\n\n param['wdwPos'] = [ [wdw_start, wdw_start + param['ww'][0]] for\n wdw_start in wdwStarts ]\n\n # Only update wdwPos structure if not already done so\n elif np.array(param['wdwPos'][0]).shape == ():\n param['wdwPos'] = [ [wdw_start, wdw_start + ww] for wdw_start,ww in\n zip(param['wdwPos'], param['ww'])]\n\n self.newParam['wdwPos'] = param['wdwPos']",
"def skimmer(bigCut, skimLoc, wsOut):\n\tprint \"Cutting on: \\n\",bigCut\n\n\tskim = TChain(\"skimTree\")\n\tskim.Add(skimLoc)\n\tlib.SetTreeInputs(skim, lib.skimDict)\n\n\tskim.SetEntryList(0)\n\tskim.Draw(\">>elist\", bigCut, \"entrylist\")\n\telist = gDirectory.Get(\"elist\")\n\tprint \"Found \",elist.GetN(),\" skim file entries.\"\n\tskim.SetEntryList(elist)\n\n\tds = GATDataSet()\n\twb = GATWaveformBrowser()\n\tds = wb.LoadSkimWaveforms(skim, bigCut)\n\tprint \"Found \",wb.GetNWaveforms(),\" waveforms.\"\n\n\tgat = ds.GetGatifiedChain()\n\tlib.SetTreeInputs(gat, lib.gatDict)\n\n\tbuilt = ds.GetBuiltChain()\n\tlib.SetTreeInputs(built, lib.builtDict)\n\n\tout = TFile(wsOut,\"RECREATE\")\n\n\twaveTree = TTree(\"waveTree\",\"wave-skim single waveforms\")\n\tenf = np.zeros(1,dtype=float)\n\tt50 = np.zeros(1,dtype=float)\n\trunTime = np.zeros(1,dtype=float)\n\tgatEnt, gatHit, skimEnt, builtEnt, eventTreeEntry, chn = 0., 0., 0., 0., 0., 0.\n\twf = MGTWaveform()\n\twaveTree.Branch(\"gatEnt\",long(gatEnt),\"gatEnt/L\")\n\twaveTree.Branch(\"gatHit\",long(gatHit),\"gatHit/L\")\n\twaveTree.Branch(\"builtEnt\",long(builtEnt),\"builtEnt/L\")\n\twaveTree.Branch(\"skimEnt\",long(skimEnt),\"skimEnt/L\")\n\twaveTree.Branch(\"eventTreeEntry\",long(eventTreeEntry),\"eventTreeEntry/L\")\n\twaveTree.Branch(\"waveform\",wf)\n\twaveTree.Branch(\"channel\",int(chn),\"channel/I\")\n\twaveTree.Branch(\"trapENFCal\",enf,\"trapENFCal/D\")\n\twaveTree.Branch(\"blrwfFMR50\",t50,\"blrwfFMR50/D\")\n\twaveTree.Branch(\"runTime\",runTime,\"runTime/D\")\n\n\t# save the entry numbers for the full event tree\n\tEntryList = []\n\n\t# fill waveTree\n\tlastEvent = 0\n\teventTreeEntry = -1\n\teventMismatchCount = 0\n\twfMismatchCount = 0\n\tfor waveNum in xrange(wb.GetNWaveforms()):\n\n\t\tgatEnt = wb.GetEntryNumber(waveNum)\n\t\tgatHit = wb.GetIterationNumber(waveNum)\n\t\tgat.GetEntry(gatEnt)\n\t\tbuilt.GetEntry(gatEnt)\n\t\tbuiltEnt = built.GetEntryNumber(gatEnt)\n\t\tskimEnt = 0\n\t\tfor ientry in xrange(elist.GetN()):\n\t\t\tentryNumber = skim.GetEntryNumber(ientry)\n\t\t\tskim.LoadTree( entryNumber )\n\t\t\tskim.GetEntry( entryNumber )\n\t\t\t# gat.LoadTree returns the entry number of the original tree\n\t\t\tif skim.iEvent==gat.LoadTree(gatEnt):\n\t\t\t\tskimEnt = entryNumber\n\t\t\t\tbreak\n\t\tskim.GetEntry(skimEnt)\n\n\t\tif abs(lib.timestamp.at(0)/1E8 - lib.s_tloc_s[0]) > 0.001:\n\t\t\tprint \"waveform\",waveNum,\": mismatched events!\"\n\t\t\teventMismatchCount += 1\n\t\t\tprint \"skim - run %d enf.at(0) %.3f enf.size %d time %.2f\" % (lib.s_run[0], lib.s_trapENFCal.at(0), lib.s_trapENFCal.size(), lib.s_tloc_s[0])\n\t\t\tprint \"gat - run %d enf.at(0) %.3f enf.size %d time %.2f\\n\" % (lib.run[0], lib.trapENFCal.at(0), lib.trapENFCal.size(), lib.timestamp.at(0)/1E8)\n\t\t\tcontinue\n\n\t\t# output some physics\n\t\twf = wb.GetWaveform(waveNum)\n\n\t\tnullchk = str(wf)\n\t\tif \"nil\" in nullchk:\n\t\t\tprint \"waveform\",waveNum,\",iteration \",gatHit,\": unexpected number of waveforms ...\"\n\t\t\twfMismatchCount +=1\n\t\t\tprint \"skim - run %d enf.at(0) %.3f enf.size %d time %.2f\" % (lib.s_run[0], lib.s_trapENFCal.at(0), lib.s_trapENFCal.size(), lib.s_tloc_s[0])\n\t\t\tprint \"gat - run %d enf.at(0) %.3f enf.size %d time %.2f\\n\" % (lib.run[0], lib.trapENFCal.at(0), lib.trapENFCal.size(), lib.timestamp.at(0)/1E8)\n\t\t\tcontinue\n\n\t\tchn = lib.channel.at(gatHit)\n\t\tenf[0] = lib.trapENFCal.at(gatHit)\n\t\tt50[0] = lib.blrwfFMR50.at(gatHit)\n\t\trunTime[0] = lib.timestamp.at(gatHit)/1E8\n\n\t\t# so you can match waveTree to eventTree\n\t\tif lastEvent != gatEnt:\n\t\t\teventTreeEntry += 1\n\t\t\tEntryList.append([gatEnt,skimEnt])\n\n\t\twaveTree.Fill()\n\n\t\tlastEvent = gatEnt\n\n\tprint \"\\nDone. For %d waveforms:\" % wb.GetNWaveforms()\n\tprint \"\\t%d had mismatched gat/skim events based on timestamp differences (and were skipped)\" % eventMismatchCount\n\tprint \"\\t%d had fewer wf's than expected in the built data (and were skipped).\" % wfMismatchCount\n\n\twaveTree.Write()\n\n\t# now fill the full event tree by looping over EntryList\n\tprint \"\\n filling full event tree ...\\n\"\n\teventTree = TTree(\"eventTree\",\"wave-skim full output\")\n\toutDict = lib.CreateOutputDict('all')\n\tlib.SetTreeOutputs(eventTree, outDict)\n\n\tfor i in EntryList:\n\t\tgatEntry = i[0]\n\t\tskimEntry = i[1]\n\n\t\tgat.GetEntry(gatEntry)\n\t\tbuilt.GetEntry(gatEntry)\n\t\tskim.GetEntry(skimEntry)\n\n\t\t# verify we get the same output as before\n\t\t# print \"skim - run %d enf-0 %.3f size %d time %.2f\" % (lib.s_run[0],lib.s_trapENFCal.at(0),lib.s_trapENFCal.size(),lib.s_tloc_s[0])\n\t\t# print \"gat - run %d enf-0 %.3f size %d time %.2f\\n\" % (lib.run[0],lib.trapENFCal.at(0),lib.trapENFCal.size(),lib.timestamp.at(0)/1E8)\n\n\t\teventTree.Fill()\n\n\teventTree.Write()\n\tout.Close()",
"def analyze_wfs(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001, compact=True):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n # Ora faccio un loop sugli eventi..\n if compact:\n for event in range(0, len(self.table_sipm_time['ev']), 9):\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_dataframe = self.analyze_ev_wf_compact(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_dataframe], ignore_index=True)\n bar.update(counter+1)\n counter += 9\n else:\n for event in self.table_sipm_time['ev']:\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_time, peaks_ampl = self.analyze_ev_wf(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat([self.wf_peaks, pd.DataFrame(\n {'t': peaks_time, 'A': peaks_ampl})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n bar.finish()\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))",
"def _default_wave(wavemin=None, wavemax=None, dw=0.2):\n from desimodel.io import load_throughput\n\n if wavemin is None:\n wavemin = load_throughput('b').wavemin - 10.0\n if wavemax is None:\n wavemax = load_throughput('z').wavemax + 10.0\n\n return np.arange(round(wavemin, 1), wavemax, dw)",
"def prep_wave(self):\n wave_str = \"Wave \" + str(self.stats.wave)\n self.wave_image = self.medium_font.render(wave_str, True, \n self.settings.text_colour, self.settings.bg_colour)\n\n # Position the wave below the score.\n self.wave_rect = self.wave_image.get_rect()\n self.wave_rect.right = self.score_rect.right\n self.wave_rect.top = self.score_rect.bottom + 10",
"def wavelenstep(self):\n return self._wavelenstep",
"def water_delay(block_size):\n\n\tdirectory = \"/local/scratch/sam5g13/Sam_5th-yr_Project/test_data\"\n\tfile_name = \"{}/tip4p2005_50_TOTEST.npy\".format(directory)\n\tgnuplot = r'/usr/bin/gnuplot'\n\n\n\tfile_data = np.load(file_name, mmap_mode='r')\n\n\t_, _, _, gamma, _ = file_data \n\n\tgamma_sample = blocksav(gamma, block_size)\n\n\tgamma_file = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\twith open(gamma_file, 'w') as outfile:\n\t\tnp.savetxt(outfile, gamma_sample)\n\n\tgamma_file_name = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\n\tcorrelations = subprocess.check_output([\"corr\", gamma_file_name])\n\t\n\tmutual_information = subprocess.check_output([\"mutual\", gamma_file_name])\n\n\tcorrelation_array = np.array(correlations.split()[5:], dtype=float)\n\tmutual_information_array = np.array(mutual_information.split()[2:], dtype=float)\n\n\tidx_odd = range(1,199,2)\n\tidx_even = range(0,200,2)\n\n\tidx_odd1 = range(1,43,2)\n\tidx_even1 = range(0,44,2)\n\n\t#correlation_values = correlation_array[idx_odd]\n\tmutual_information_values = mutual_information_array[idx_odd1]\n\tprint 'LOOK HERE...........................................', mutual_information_array[idx_odd1], len(mutual_information_array[idx_odd1])\n\n\t\"\"\"\n\tdelay_length = 0\n\n\tfor o in range(len(correlation_values) - 1):\n\t\tprint o, correlation_values[o], correlation_values[o+1]\n\t\tif correlation_values[o] > correlation_values[o+1]:\n\t\t\tdelay_length = o \n\t\telse: break\n\t\n\tdelay_length = delay_length + 1\n\n\tprint \"The delay length is\", delay_length\n\t\"\"\"\n\n\tmutual_info_length = 0\n\n\tfor o in range(len(mutual_information_values) - 1):\n\t\t#print o, correlation_values[o], correlation_values[o+1]\n\t\tif mutual_information_values[o] > mutual_information_values[o+1]:\n\t\t\tmutual_info_length = o \n\t\telse: break\n\t\n\tmutual_info_length = mutual_info_length + 1\n\t\n\tprint \"The mutual info length is\", mutual_info_length\n\n\t#assert \tdelay_length == mutual_info_length, \"The minimums of the mutual information and the correlations are not equal! %d %d\" % (delay_length, mutual_info_length)\n\t\n\tproduce_delays = subprocess.check_output([\"delay\", gamma_file_name, \"-d\" + str(mutual_info_length)])\n\n\t\n\tdelay_file = \"{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt\".format(directory, block_size, mutual_info_length)\n\tf = open(delay_file, 'w')\n\tf.write(produce_delays)\n\tf.close()\n\n\t\"\"\"\n\n\tprint produce_delays\n\tprint len(produce_delays), len(mutual_information_values)\n\tplt.figure(\"produce_delays vs mutual information\")\n\tplt.xlabel(\"produce_delays\")\n\tplt.ylabel(\"Mutual information\")\n\tplt.plot(produce_delays, mutual_information_values)\n\tplt.show()\n\t\n\t\"\"\"\n\t\n\tembedding = subprocess.check_output([\"false_nearest\", gamma_file_name])\n\n\tembedding_dimension = int(raw_input(\"What embedding dimension would you like to use? \"))\n\t\n\trun_calc = subprocess.check_output(['gnuplot', '-e', \"filename='{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt';ofilename='tip4p2005_50_blocksize_{}_gamma_delay_{}_graph.png'\".format(directory, block_size, mutual_info_length, block_size, mutual_info_length ),\"plot.gnu\"])\n\n\n\t\"\"\"Imports the time series and specifies each aspect used in building the recurrence matrix\"\"\"\n\n\tsettings = Settings(time_series = gamma_sample, embedding_dimension = embedding_dimension, time_delay = mutual_info_length, similarity_measure = EuclideanMetric, neighbourhood = FixedRadius(radius = 13), min_diagonal_line_length = 2, min_vertical_line_length = 2)\n\n\t\"\"\"Performs the computation and prints out all the results\"\"\"\n\n\trqacomputation = RQAComputation.create(settings, verbose = True)\n\n\trqaresult = rqacomputation.run()\n\n\tprint rqaresult\n\n\t\"\"\"Creates the Recurrence matrix for viewing\"\"\"\n\n\trpcomputation = RecurrencePlotComputation.create(settings)\n\n\trpresult = rpcomputation.run()\n\n\tImageGenerator.save_recurrence_plot(rpresult.recurrence_matrix, 'recurrence_plot.png')",
"def layout_waveguide(cell, layer, points_list, width, smooth=False):\n\n dbu = cell.layout().dbu\n\n dpolygon = waveguide_dpolygon(points_list, width, dbu, smooth=smooth)\n dpolygon.compress(True)\n dpolygon.layout(cell, layer)\n return dpolygon",
"def set_custom_wave(self, wave: np.ndarray, update_config: bool = True) -> None:\n self.wave = wave\n if update_config:\n self.start_wavelength = self.wave[0]\n self.end_wavelength = self.wave[-1]\n self.R_samp = np.nan\n self._custom_wave = True",
"def spawn_scout(self):\n\n #If wave 2\n if self.wave == 2:\n \n #Spawn 1 scout\n self._spawn_scout(self.screen_width // 2, self.screen_height // 10)\n\n #If wave 5\n elif self.wave == 5:\n\n #Spawn 2 scout\n self._spawn_scout(self.screen_width // 3, self.screen_height // 10)\n self._spawn_scout(self.screen_width // (3/2), self.screen_height // 10)\n\n #If wave 7\n elif self.wave == 7:\n\n #Spawn 3 scout\n self._spawn_scout(self.screen_width // 4, self.screen_height // 10)\n self._spawn_scout(self.screen_width // 2, self.screen_height // 10)\n self._spawn_scout(self.screen_width // (4/3), self.screen_height // 10)",
"def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()",
"def give_wv_units(wave):\n if not hasattr(wave, 'unit'):\n uwave = u.Quantity(wave, unit=u.AA)\n elif wave.unit is None:\n uwave = u.Quantity(wave, unit=u.AA)\n else:\n uwave = u.Quantity(wave)\n\n return uwave",
"def dumbSnake_burst_window(self,xStart,xEnd,yDelta, nRoundTrips, sweepTime,windowlist):#for burst mode\n #windowList = np.zeros([numYwindow,numXwindow],dtype=object)\n \n self.sam_x.umv(xStart)\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n for j in (windowList):\n self.sam_y.umv(windowList)\n self.sam_y.wait()\n print('Windos position %f'%(self.sam_w.wm()))\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.05)\n seq.start()#start sequence Need to be set \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)#wait for turning around \n self.sam_x.mv(xStart)\n sleep(0.05)\n #pp.open()\n seq.start()#start sequence \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n sleep(1)\n except:\n print('round trip %d didn not end happily' % i)\n daq.end_run()\n daq.disconnect()",
"def config_step_sweep(self):\n self.write(\":SOUR:FREQ:MODE SWE;\"\n \":SOUR:SWE:GEN STEP;\"\n \":SOUR:SWE:MODE AUTO;\")",
"def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur"
]
| [
"0.51027554",
"0.49448773",
"0.48653984",
"0.48416498",
"0.48164257",
"0.48011476",
"0.47285727",
"0.46642175",
"0.46641108",
"0.46604708",
"0.4649817",
"0.46433756",
"0.4642581",
"0.45872566",
"0.4520838",
"0.450811",
"0.44464996",
"0.44350323",
"0.44100547",
"0.44029313",
"0.43993545",
"0.43689683",
"0.43662307",
"0.4359684",
"0.4345432",
"0.4320815",
"0.43120494",
"0.4308438",
"0.43080753",
"0.42996007"
]
| 0.7523045 | 0 |
Return dictionary of servo states or "False" if the qSVO command is not supported. | def getservo(pidevice, axes):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
axes = getaxeslist(pidevice, axes)
if not axes:
return {}
if pidevice.HasqSVO():
return pidevice.qSVO(axes)
return dict(list(zip(axes, [False] * len(axes)))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def servo_codes(self):\r\n return self._arm.servo_codes",
"def setservo(pidevice, axes, states=None, toignore=None, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n if not pidevice.HasSVO():\n return False\n if not axes:\n return True\n axes, states = getitemsvaluestuple(axes, states)\n if pidevice.HasRNP():\n axestorelax = [axis for axis, state in list(getservo(pidevice, axes).items()) if not state]\n if axestorelax:\n pidevice.RNP(axestorelax, [0.0] * len(axestorelax))\n waitonready(pidevice, **kwargs)\n eaxaxes = [axes[i] for i in range(len(axes)) if states[i]]\n enableaxes(pidevice, axes=eaxaxes, **kwargs)\n success = True\n toignore = [] if toignore is None else toignore\n toignore = [toignore] if not isinstance(toignore, list) else toignore\n toignore += [gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO, gcserror.E23_PI_CNTR_ILLEGAL_AXIS]\n for i, axis in enumerate(axes):\n try:\n pidevice.SVO(axis, states[i])\n except GCSError as exc: # no GCSRaise() because we want to log a warning\n if exc in toignore:\n debug('could not set servo for axis %r to %s: %s', axis, states[i], exc)\n success = False\n else:\n raise\n waitonready(pidevice, **kwargs)\n return success",
"async def get_hw_switch_states(self) -> Dict[str, bool]:\n raise NotImplementedError",
"def state(self):\n return {\n 'port' : self.self.__port.port, #port name/number as set by the user\n 'baudrate' : self.self.__port.baudrate, #current baudrate setting\n 'bytesize' : self.self.__port.bytesize, #bytesize in bits\n 'parity' : self.self.__port.parity, #parity setting\n 'stopbits' : self.self.__port.stopbits, #stop bit with (1,2)\n 'timeout' : self.self.__port.timeout, #timeout setting\n 'xonxoff' : self.self.__port.xonxoff, #if Xon/Xoff flow control is enabled\n 'rtscts' : self.self.__port.rtscts #if hardware flow control is enabled\n }",
"def status(self):\n\n # --- get 0 padded string representation of status register\n response = self.send_lens_cmd(['90', 'B9', '00'], fast_mode=True)\n state_str = bin(int('0x' + response['MISO'][2], 16))\n state_str = state_str[2:]\n for p in range(8 - len(state_str)):\n state_str = '0' + state_str\n\n self._status = dict(AF_switch=bool(int(state_str[0])),\n F_move=bool(int(state_str[5])),\n F_acc=bool(int(state_str[2])),\n FD_endStop=bool(int(state_str[3])),\n status_byte=state_str)\n\n return self._status",
"def status(self,axis):\n \n if not self.enabled:\n return (False,False)\n \n enabled = True\n self.send_cmd(axis, ' PRINT MVG')\n\n flag = self.ser.read(100)\n moving = True\n \n if flag[:4] == b'FALS': \n moving = False\n elif flag[:4] == b'TRUE':\n moving = True\n\n non_moving = not moving\n return (enabled, non_moving)",
"def get_terminal_observing_states(self):\n pass",
"def _get_state(self):\n print(\"GET STATE\")\n res = self._send_command(\n \"RS;\",\n fb_required=True,\n res_pattern=\"STATE:\")\n # The received answer is supposed to be something like\n # STATE:0|1|-1\n state = int(res.split(':')[1])\n if state == PVDriver.IDLE:\n return \"IDLE\"\n elif state == PVDriver.MOVING:\n return \"MOVING\"\n else:\n return \"ERROR\"",
"def get_system_state(self):\n byte = self.system_state\n return {\n 'chksum': bool(byte & (1 << 6)),\n 'ack': bool(byte & (1 << 4)),\n 'FPGAboot': bool(byte & (1 << 2)),\n 'FPGArun': bool(byte & (1 << 1)),\n 'FPGAcom': bool(byte & (1 << 0)),\n }",
"def output(self):\n match = re.search('STS1=(\\d+)', self.ask('OC;'))\n status = int(match.groups(0)[0])\n isOn = status & 16\n if isOn:\n isOn = True\n else:\n isOn = False\n self.notify('output', isOn)\n return isOn",
"def motor_enable_states(self):\r\n return self._arm.motor_enable_states",
"def getMouvementSteps(Servos):\n\tdictPosition = {} # Cree un dictionnaire vide\n\tprogMode(False) # Desactive le couple des servos\n\n\tfor servo in Servos: # Pour chaque servo\n\t\t# Ajoute la position du servo au dico\n\t\tdictPosition[servo] = axDriver.getPosition(servo)\n\t\ttime.sleep(0.001) # Attends un petit peu pour eviter le timeout du servo\n\n\treturn dictPosition # Retourne le dictionnaire",
"def get_obs_dict(self):\n arm_state = self.robot.get_state('arm')\n gripper_state = self.robot.get_state('gripper')\n # obj_state = self.robot.get_state('object')\n obs_dict = collections.OrderedDict((\n ('t', self.robot.time),\n ('qp', np.concatenate([gripper_state.qpos])),\n ('qv', np.concatenate([gripper_state.qvel])),\n ('obj_qp', self.sim.data.qpos[-self.N_DOF_OBJ:]),\n ('mocap_pos', self.sim.data.mocap_pos.copy()),\n ('mocap_quat', self.sim.data.mocap_quat.copy()),\n ('goal', self.goal),\n ))\n return obs_dict",
"def status(self):\n return self._bp.get_motor_status(self._port)",
"def get_status():\n return ('off', 'off')",
"def get_state(self) -> Dict[str, Any]:\n return {\"aq_potential_num\": self.aq_potential_num, \"wq_potential_num\": self.wq_potential_num}",
"def send_states(self):\n\n teleop_enabled_msg = Bool()\n teleop_enabled_msg.data = self.teleop_enabled\n\n assisted_driving_enabled_msg = Bool()\n assisted_driving_enabled_msg.data = self.assisted_driving_enabled\n\n self.teleop_enabled_pub.publish(teleop_enabled_msg)\n self.assisted_driving_enabled_pub.publish(assisted_driving_enabled_msg)",
"def store_servo_state(self, message):\n id_data = []\n angle_data = []\n for servo_motor_message in message.servos:\n id_data.append(servo_motor_message.id)\n angle_data.append(servo_motor_message.angle)\n self.servo_ids = id_data\n self.servo_angles = angle_data",
"def get_outputs(self):\n\n return {\n 'enable_stamp': self.enable_cbx.isChecked()\n }",
"def _stateDict(self):\n\n data = {}\n # if self.currentState[4]:\n # data['action'] = 'BRAK'\n # else:\n data['action'] = 'MCTL'\n data['speed'] = float(self.speed)\n data['steerAngle'] = float(self.steering_angle)\n\n return data",
"def status(self) -> dict:\n return {\"volume\": self.volume, \"mute\": self.mute}",
"def get_roof_status_from_tcs():\n\t\n\ttarget = send_command('getstatus dome')\n\tsplit_ans = target.split()\n\t\n\treturn split_ans",
"def servo_read_all(self):\n msg = b'\\x25\\x00'\n parameter_len = 16\n ans = self.__bt.read(msg, parameter_len)\n if ans is not None:\n return [x for x in ans]\n return None",
"def get_hw_switch_states(self):\n hw_states = dict()\n #k = self._kp.keypad()\n k = \"\"\n for number, sw in self.switches.items():\n if number == k:\n hw_states[number] = 1\n else:\n hw_states[number] = 0\n return hw_states",
"def setupStatus(self):\n str = \"%s,\\tpv %s\\n\" % (self.name,self.pvname)\n #check encoder setup:\n # if EQ, check if encoded, backlash\n # if EE, check if encoded, deadband\n try:\n partNr = Pv.get('%s.PN'%self.pvname)\n except:\n pass\n if Pv.get(\"%s.SM\"%self.pvname)==0:\n str+= 'Stall Mode: Stop on Stall\\n'\n else:\n str+= 'Stall Mode: No Stop\\n'\n str+='Limits: '\n if Pv.get(\"%s.S1\"%self.pvname)==0:\n str+='\\t 1 not set up'\n if Pv.get(\"%s.S2\"%self.pvname)==0:\n str+='\\t 2 not set up'\n str+=' \\n'\n\n str+='Motor Speed: %f (%f turns/sec)\\n'%(self.get_par('slew_speed'), self.get_par('s_speed'))\n str+='\\t accel. from %f in %f sec\\n'%(self.get_par('base_speed'), self.get_par('acceleration'))\n sbas = self.get_par('s_base_speed')\n if (Pv.get('%s.BS'%self.pvname)==sbas):\n Pv.put(('%s.BS'%self.pvname),self.get_par('s_speed'))\n if (Pv.get('%s.HS'%self.pvname)==sbas):\n Pv.put(('%s.HS'%self.pvname),sbas*1.01)\n\n rdbd = self.get_par('retry_deadband')\n enc = self.get_par('encoder')\n if partNr.find('EQ'):\n if enc==0:\n res = self.get_par('resolution')\n str+='Internally Encoded motor running unencoded with step size of %g\\n'%res\n else:\n res = self.get_par('encoder_step')\n str+='Internally Encoded motor running encoded with enc. step size of %g\\n'%res\n if self.get_par('backlash') == 0.:\n str+='No Backlash\\n'\n else:\n str+='Internally encoded motor with backlash correction of %g (%g enc/motor steps, %g turns)\\n'%(self.get_par('backlash'),self.get_par('backlash')/res,self.get_par('backlash')/self.get_par('u_revolutions'))\n if rdbd < res*3:\n str+='EPICS retry deadband of %f tighter than 3 encoder/motor steps.\\n'%rdbd\n elif rdbd > res*50:\n str+='EPICS retry deadband of %f (%g steps, %g motor turns), will affect wait function'%(rdbd, rdbd/res, rdbd/self.get_par('u_revolutions'))\n elif partNr.find('EE'):\n if enc!=0:\n res = self.get_par('encoder_step')\n str+='Motor using an external encoder with resolution of %g!'%res\n if self.get_par('backlash') != 0.:\n str+='??? closed loop motor with backlash correction???\\n'\n else:\n res = self.get_par('resolution')\n str+='Motor running open loop with resolution of %g!'%res\n if self.get_par('backlash') == 0.:\n str+='no backlash correctio!n\\n'\n else:\n str+='backlash correction of %g\\n'%(self.get_par('backlash')/res)\n if rdbd < res*3:\n str+='EPICS retry deadband tighter than 3 encoder/motor steps.'\n elif rdbd > res*50:\n str+='EPICS retry deadband of %f (%g steps, %g motor turns), will affect wait function'%(rdbd, rdbd/res, rdbd/self.get_par('u_revolutions'))\n \n return str",
"def read_output_status(self):\n function_string = 'OP' + self.output + '?'\n return self.scpi_comm(function_string)",
"def get_gesture_status(self):\n status_dict = dict()\n status = self.read_byte_data(APDS_9960.GESTURE_STATUS_REG_ADDRESS)\n status_dict[\"Gesture FIFO Overflow\"] = bool(status & 0x02)\n status_dict[\"Gesture FIFO Data\"] = bool(status & 0x01)\n return status_dict",
"def status(self):\n status = {\n 'driver':self.driver.status,\n 'arduino':self.arduino.status,\n 'sensors':[]}\n\n for name, sensor in self.sensors.items():\n sensor_status = sensor.status\n sensor_status['name'] = name\n status['sensors'].append(sensor_status)\n\n return status",
"def check_relay_status():\n \n query_cmd_packet = b'\\x04\\x18\\x00\\x00\\x00\\x1b\\x0f'\n ser_relay.write(query_cmd_packet)\n time.sleep(1)\n resp_array = array('B', ser_relay.read(7))\n \n return resp_array",
"def servo_active(self, *args, **kwargs) -> Any:\n pass"
]
| [
"0.5569695",
"0.5473434",
"0.54679066",
"0.5196154",
"0.51944697",
"0.5161103",
"0.5157473",
"0.5153603",
"0.5144221",
"0.51405114",
"0.50765646",
"0.50393516",
"0.49805614",
"0.49218124",
"0.49120718",
"0.48817956",
"0.4870195",
"0.4847555",
"0.48379037",
"0.48323292",
"0.48246977",
"0.47996983",
"0.4777685",
"0.47746658",
"0.47343594",
"0.47322202",
"0.4731799",
"0.46941945",
"0.4682508",
"0.46770707"
]
| 0.574958 | 0 |
Return dictionary of on target states for open or closedloop 'axes'. If qOSN is not supported open loop axes will return True. | def ontarget(pidevice, axes):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
axes = getaxeslist(pidevice, axes)
if not axes:
return {}
servo = getservo(pidevice, axes)
closedloopaxes = [axis for axis in axes if servo[axis]]
openloopaxes = [axis for axis in axes if not servo[axis]]
isontarget = {}
if closedloopaxes:
if pidevice.HasqONT():
isontarget.update(pidevice.qONT(closedloopaxes))
elif pidevice.HasIsMoving():
ismoving = pidevice.IsMoving(closedloopaxes).values()
isontarget.update(dict(list(zip(closedloopaxes, [not x for x in ismoving]))))
if openloopaxes:
if pidevice.HasqOSN():
stepsleft = pidevice.qOSN(openloopaxes).values()
isontarget.update(dict(list(zip(openloopaxes, [x == 0 for x in stepsleft]))))
else:
isontarget.update(dict(list(zip(openloopaxes, [True] * len(openloopaxes)))))
return isontarget | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def axes_coupled(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n attr_name = 'axisCoupling'\n\n attr_path = target_ctrl_path + '.' + attr_name\n\n if not pm.objExists(attr_path):\n return False\n else:\n return pm.getAttr(attr_path)",
"def axes_active(self) -> np.ndarray: # array[Axes]\n return self.axes.flat[:self.n_plots]",
"def get_toggle_axes_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [flags, False]\n return args",
"def _get_axis_state_ps90(self, control_unit: int, axis: int) -> Tuple[Union[int, bool, str]]:\n control_unit = ctypes.c_long(control_unit)\n axis = int(axis)\n axis = ctypes.c_long(axis)\n sleep(time_ps_delay)\n res = self.lib.PS90_GetAxisState(control_unit, axis)\n error = self.__get_read_error_ps90(control_unit)\n if error != 0:\n res = False\n return res, self._error_OWIS_ps90(error, 1)",
"def get_state(self):\n state_dict = OrderedDict()\n for key, target_object in self._map.items():\n state = self._get_single_state(target_object)\n if state is not None:\n # pushbuttons for example are not defined in the get function\n state_dict[key] = state\n return state_dict",
"def axes_enabled(self):\n if hasattr(self, 'axes_widget'):\n return bool(self.axes_widget.GetEnabled())\n return False",
"def getOpenStates(self, objecttype=None):\n return config.OPEN_STATES",
"def getAxesOperations(self):\n \n axisOpsDict = {}\n for axis in self.axisWidgets:\n op = str(axis.getAxisOperationsButton().text()).strip()\n axisOpsDict[axis.getID()] = op\n \n return axisOpsDict",
"def _apply_toffoli(self, state, axes, **kwargs):\n cntrl_max = np.argmax(axes[:2])\n cntrl_min = cntrl_max ^ 1\n ndim = self._ndim(state)\n sl_a0 = _get_slice(0, axes[cntrl_max], ndim)\n sl_a1 = _get_slice(1, axes[cntrl_max], ndim)\n sl_b0 = _get_slice(0, axes[cntrl_min], ndim - 1)\n sl_b1 = _get_slice(1, axes[cntrl_min], ndim - 1)\n\n # If both controls are smaller than the target, shift the target axis down by two. If one\n # control is greater and one control is smaller than the target, shift the target axis\n # down by one. If both controls are greater than the target, leave the target axis as-is.\n if axes[cntrl_min] > axes[2]:\n target_axes = [axes[2]]\n elif axes[cntrl_max] > axes[2]:\n target_axes = [axes[2] - 1]\n else:\n target_axes = [axes[2] - 2]\n\n # state[sl_a1][sl_b1] gives us all of the amplitudes with a |11> for the two control qubits.\n state_x = self._apply_x(state[sl_a1][sl_b1], axes=target_axes)\n state_stacked_a1 = self._stack([state[sl_a1][sl_b0], state_x], axis=axes[cntrl_min])\n return self._stack([state[sl_a0], state_stacked_a1], axis=axes[cntrl_max])",
"def configure_states_discovery(state_options, ode):\n out_meta = ode.get_io_metadata(iotypes='output', metadata_keys=['tags'],\n get_remote=True)\n\n for name, meta in out_meta.items():\n tags = meta['tags']\n prom_name = meta['prom_name']\n state = None\n for tag in sorted(tags):\n\n # Declared as rate_source.\n if tag.startswith('dymos.state_rate_source:') or tag.startswith('state_rate_source:'):\n state = tag.rpartition(':')[-1]\n if tag.startswith('state_rate_source:'):\n msg = f\"The tag '{tag}' has a deprecated format and will no longer work in \" \\\n f\"dymos version 2.0.0. Use 'dymos.state_rate_source:{state}' instead.\"\n om.issue_warning(msg, category=om.OMDeprecationWarning)\n if state not in state_options:\n state_options[state] = StateOptionsDictionary()\n state_options[state]['name'] = state\n\n if state_options[state]['rate_source'] is not None:\n if state_options[state]['rate_source'] != prom_name:\n raise ValueError(f\"rate_source has been declared twice for state \"\n f\"'{state}' which is tagged on '{name}'.\")\n\n state_options[state]['rate_source'] = prom_name\n\n # Declares units for state.\n if tag.startswith('dymos.state_units:') or tag.startswith('state_units:'):\n tagged_state_units = tag.rpartition(':')[-1]\n if tag.startswith('state_units:'):\n msg = f\"The tag '{tag}' has a deprecated format and will no longer work in \" \\\n f\"dymos version 2.0.0. Use 'dymos.{tag}' instead.\"\n om.issue_warning(msg, category=om.OMDeprecationWarning)\n if state is None:\n raise ValueError(f\"'{tag}' tag declared on '{prom_name}' also requires \"\n f\"that the 'dymos.state_rate_source:{tagged_state_units}' \"\n f\"tag be declared.\")\n state_options[state]['units'] = tagged_state_units\n\n # Check over all existing states and make sure we aren't missing any rate sources.\n for name, options in state_options.items():\n if options['rate_source'] is None:\n raise ValueError(f\"State '{name}' is missing a rate_source.\")",
"def event_in_axes(self, event: matplotlib.backend_bases.LocationEvent) \\\n -> bool:\n return event.inaxes == self.subplot",
"def get_observation_verbose(self):\n state = {}\n for grid_id, grid in self.grids.items():\n o = grid.get_active_orders(self.city_time)\n d = list(grid.get_idle_drivers().values())\n state[grid_id] = [o,d]\n return state",
"def get_buttons_state(self):\n return joy_to_xbox(self.xbox_listener.get())",
"def conditional_modes(mu_yx, x_q, yv_min, yv_max, n_modes=10):\n def modes_at(x_query):\n \"\"\"\n Find the modes at the embedding conditioned at x.\n\n Parameters\n ----------\n x_query : numpy.ndarray\n A query point (1, d_x)\n\n Returns\n -------\n numpy.ndarray\n The mode locations (n_modes, d_y)\n \"\"\"\n return multiple_modes(lambda y_q: mu_yx(y_q, x_query), yv_min, yv_max,\n n_modes=n_modes)\n\n # This computes the modes at all query points\n # Size: (n_q, n_modes, n_dims)\n y_modes = np.array([modes_at(x) for x in x_q[:, np.newaxis]])\n\n # This returns the corresponding input coordinates for each mode\n # Size: (n_q, n_modes, n_dims)\n x_modes = np.repeat(x_q[:, np.newaxis], n_modes, axis=1)\n\n # Return the modes\n # Size: (n_q, n_modes, d_x)\n # Size: (n_q, n_modes, d_y)\n return x_modes, y_modes",
"def derive_logical_axes(self, optimizer_state, param_logical_axes):\n optimizer_logical_axes = jax.tree_map(lambda x: None,\n optimizer_state.state_dict())\n optimizer_logical_axes['target'] = param_logical_axes\n\n def factor_rule(logical_axes, adafactor_leaf):\n return dict(\n v_row=None,\n v_col=None,\n v=logical_axes if adafactor_leaf['v'].shape != (1,) else None,\n m=logical_axes if self.hyper_params.beta1 else None)\n\n optimizer_logical_axes['state']['param_states'] = jax.tree_map(\n factor_rule, unfreeze(param_logical_axes),\n optimizer_state.state_dict()['state']['param_states'])\n\n return optimizer_state.restore_state(unfreeze(optimizer_logical_axes))",
"def _go_to_axes(self, session, el=None, az=None, third=None):\n move_defs = []\n for axis_name, short_name, target in [\n ('Azimuth', 'az', az),\n ('Elevation', 'el', el),\n ('Boresight', 'third', third),\n ]:\n if target is not None:\n move_defs.append(\n (short_name, self._go_to_axis(session, axis_name, target)))\n if len(move_defs) is None:\n return True, 'No motion requested.'\n\n moves = yield DeferredList([d for n, d in move_defs])\n all_ok, msgs = True, []\n for _ok, result in moves:\n if _ok:\n all_ok = all_ok and result[0]\n msgs.append(result[1])\n else:\n all_ok = False\n msgs.append(f'Crash! {result}')\n\n if all_ok:\n msg = msgs[0]\n else:\n msg = ' '.join([f'{n}: {msg}' for (n, d), msg in zip(move_defs, msgs)])\n return all_ok, msg",
"def get_obs_dict(self):\n arm_state = self.robot.get_state('arm')\n gripper_state = self.robot.get_state('gripper')\n # obj_state = self.robot.get_state('object')\n obs_dict = collections.OrderedDict((\n ('t', self.robot.time),\n ('qp', np.concatenate([gripper_state.qpos])),\n ('qv', np.concatenate([gripper_state.qvel])),\n ('obj_qp', self.sim.data.qpos[-self.N_DOF_OBJ:]),\n ('mocap_pos', self.sim.data.mocap_pos.copy()),\n ('mocap_quat', self.sim.data.mocap_quat.copy()),\n ('goal', self.goal),\n ))\n return obs_dict",
"def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()",
"def twin_axes (self):\n return self._twin_axes",
"def all_open(pins, sliders):\n for pin in pins:\n for slider in sliders:\n if not open_at(pin, slider):\n return False\n return True",
"def getOpenWorkflowStates(self):\n return OPEN_STATES",
"def axes_inactive(self) -> np.ndarray:\n return self.axes.flat[self.n_plots:]",
"def get_obs_dict(self) -> Dict[str, np.ndarray]:\n robot_state = self.robot.get_state('dkitty')\n torso_track_state = self.tracker.get_state('torso')\n\n return collections.OrderedDict((\n # Add observation terms relating to being upright.\n *self._get_upright_obs(torso_track_state).items(),\n ('root_pos', torso_track_state.pos),\n ('root_euler', torso_track_state.rot_euler),\n ('root_vel', torso_track_state.vel),\n ('root_angular_vel', torso_track_state.angular_vel),\n ('kitty_qpos', robot_state.qpos),\n ('kitty_qvel', robot_state.qvel),\n ('last_action', self._get_last_action()),\n ('pose_error', self._desired_pose - robot_state.qpos),\n ))",
"def get_observation_driver_state(self):\n next_state = np.zeros(self.n_grids)\n grids = list(self.grids.values())\n for idx, grid in enumerate(grids):\n if grid is not None:\n next_state[idx] = grid.get_idle_driver_numbers_loop()\n return next_state",
"def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node",
"def axesnames(self):\n return self._axesnames",
"def _isopen(self):\n return self.dp.state()==PyTango.DevState.OPEN",
"def config_independent_frames(self):\n return {'standard': 'dispname', 'bias': None, 'dark': None}",
"def config_independent_frames(self):\n return {'standard': 'dispname','bias': None, 'dark': None}",
"def IsOpenYmax(self, *args):\n return _Bnd.Bnd_Box_IsOpenYmax(self, *args)"
]
| [
"0.52759826",
"0.51507485",
"0.49435332",
"0.48457035",
"0.48273617",
"0.47947535",
"0.47759688",
"0.47717133",
"0.4736952",
"0.47318158",
"0.4717859",
"0.471497",
"0.46966356",
"0.4686796",
"0.4681813",
"0.4674398",
"0.46723193",
"0.46713328",
"0.46647486",
"0.46268576",
"0.46147096",
"0.45987737",
"0.4585477",
"0.45670706",
"0.45342416",
"0.45328343",
"0.4515862",
"0.44854215",
"0.44727746",
"0.44711477"
]
| 0.5636245 | 0 |
Wait until referencing of 'axes' is finished or timeout. | def waitonreferencing(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
axes = getaxeslist(pidevice, axes)
if not axes:
return
waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)
maxtime = time() + timeout
if pidevice.devname in ('C-843',):
pidevice.errcheck = False
while not all(list(pidevice.qFRF(axes).values())):
if time() > maxtime:
stopall(pidevice)
raise SystemError('waitonreferencing() timed out after %.1f seconds' % timeout)
sleep(polldelay)
if pidevice.devname in ('C-843',):
pidevice.errcheck = True
sleep(postdelay) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def waitontarget(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)\n if not pidevice.HasqONT():\n return\n servo = getservo(pidevice, axes)\n axes = [x for x in axes if servo[x]]\n maxtime = time() + timeout\n while not all(list(pidevice.qONT(axes).values())):\n if time() > maxtime:\n raise SystemError('waitontarget() timed out after %.1f seconds' % timeout)\n sleep(polldelay)\n sleep(postdelay)",
"def done(self):\n if not self._isSubplot:\n raise Exception(\"You are not in a subplotting context!\")\n self.__exit__()",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def referencewait(self):\n debug('ControllerStartup.referencewait()')\n if not self.refmodes or self.prop['skipref']:\n return\n self._databuf['servobuf'] = getservo(self.pidevice, self.pidevice.axes)\n toreference = {} # {cmd: [axes]}\n for i, refmode in enumerate(self._refmodes[:self.pidevice.numaxes]):\n if not refmode:\n continue\n axis = self.pidevice.axes[i]\n refmode = refmode.upper()\n if refmode not in toreference:\n toreference[refmode] = []\n if self._isreferenced(refmode, axis):\n debug('axis %r is already referenced by %r', axis, refmode)\n else:\n toreference[refmode].append(self.pidevice.axes[i])\n waitonaxes = []\n for refmode, axes in toreference.items():\n if not axes:\n continue\n if refmode == 'POS':\n self._ref_with_pos(axes)\n elif refmode == 'ATZ':\n self._autozero(axes)\n else:\n self._ref_with_refcmd(axes, refmode)\n waitonaxes += axes\n waitonreferencing(self.pidevice, axes=waitonaxes, **self._kwargs)",
"def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def wait(self):\n self.mainloop().wait()",
"def wait(self, axis, timeout=10):\n if not self.enabled:\n return\n\n # Wait for the motor to stop moving\n moving = True\n seconds = int(round(time.time() * 1000))\n \n # check moving flag\n while moving:\n time.sleep(0.01)\n flags = self.status(axis)\n if (flags[0] and flags[1])==True:\n moving = False\n return False\n else: # Timeout\n moving = True\n if timeout == -1:\n pass\n elif (int(round(time.time() * 1000))-seconds)/1000 > timeout:\n return True",
"def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def waitonoma(pidevice, axes=None, timeout=300, predelay=0, polldelay=0.1):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n numsamples = 5\n positions = []\n maxtime = time() + timeout\n waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)\n while True:\n positions.append(list(pidevice.qPOS(axes).values()))\n positions = positions[-numsamples:]\n if len(positions) < numsamples:\n continue\n isontarget = True\n for vals in zip(*positions):\n isontarget &= sum([abs(vals[i] - vals[i + 1]) for i in range(len(vals) - 1)]) < 0.01\n if isontarget:\n return\n if time() > maxtime:\n stopall(pidevice)\n raise SystemError('waitonoma() timed out after %.1f seconds' % timeout)\n sleep(polldelay)",
"def wait(self):\n self.event.wait()",
"def waitonphase(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)\n maxtime = time() + timeout\n while not all([x > -1.0 for x in pidevice.qFPH(axes).values()]):\n if time() > maxtime:\n raise SystemError('waitonphase() timed out after %.1f seconds' % timeout)\n sleep(polldelay)\n sleep(postdelay)",
"def finalize(self):\n self.thread.quit()\n self.color.release()\n self.pos.release()\n\n if self.initCoordinates.f_timer is not None:\n for f_timer in self.initCoordinates.f_timer:\n self.timer.addFunctionTimer(f_timer)\n if self.numMethod.f_timer is not None:\n for f_timer in self.numMethod.f_timer:\n self.timer.addFunctionTimer(f_timer)",
"def show_fig_and_wait(self):\n\n # window management\n self.fig.canvas.manager.show()\n self.fig.canvas.draw_idle()\n # starting a 'blocking' loop to let the user interact\n self.fig.canvas.start_event_loop(timeout=-1)",
"def wait_observation_time(self, time: int) -> None:\n var = tk.IntVar()\n self.master.after(time * 1000, var.set, 1)\n print(\"waiting...\")\n self.master.wait_variable(var)",
"def wait_complete(self):\n self.join()",
"def wait(self, cycles):\n\t\tpass",
"def call_back(self):\n\n # Poll the pipe\n while self.pipe.poll():\n # Look inside of the pipe and take the_box\n the_box = self.pipe.recv()\n\n # If the_box is empty, it's game over\n if the_box is None:\n self.terminate()\n return False\n\n # Otherwise, update the plot with the tools in the_box\n else:\n # Get our elapsed time\n elapsed_time = time.time() - the_box[0]\n\n # Add the elements to the plot\n self.ax1.plot(elapsed_time, the_box[1], c='tab:orange',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax1.plot(elapsed_time, the_box[2], c='tab:blue',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax2.plot(elapsed_time, the_box[3], c='tab:pink',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n\n current_gen = the_box[-2]\n generations = the_box[-1]\n if current_gen == generations - 1:\n x, y = self.get_path_coordinates(the_box[4], the_box[5], the_box[6])\n self.ax3.plot(y, x, c='tab:olive', marker=r'$\\clubsuit$',\n alpha=0.5, markersize=10)\n\n\n # Redraw the canvas\n self.fig.canvas.draw()\n return True",
"def __exit__(self, *args):\n # Do the last (and perhaps only) call's plotting\n self._doPlots()\n self._isSubplot = False\n self.opts.goGlobal()\n if not self.usingAgg:\n self.fig.canvas.mpl_connect('resize_event', self.subplots_adjust)",
"def mock_dear_py_gui():\n def _gui_thread(self):\n while not self.stop:\n _ = self.process_data.get()\n\n BaseRealTimeVisualizer._gui_thread = _gui_thread\n BaseRealTimeVisualizer.should_close = lambda self: False",
"def mpl_plot_was_rendered():\n figs_before = plt.gcf().number\n yield\n figs_after = plt.gcf().number\n assert figs_after > figs_before",
"def wait_for_animation(self):\n # Freeze current element data so we can see if it changes.\n self._freeze_element_data()\n dom = self._get_dom()\n prev_dom = dom\n # Now set a time to see if the content keeps changing.\n # Don't wait for more than 5 seconds for an animation to complete.\n seconds_timeout = 5\n seconds_spent = 0\n seconds_interval = .2\n while seconds_spent < seconds_timeout:\n time.sleep(seconds_interval)\n self._freeze_element_data()\n new_dom = self._get_dom()\n # Do a straight string compare to see if anything is changing in the dom.\n if new_dom == prev_dom:\n break\n prev_dom = new_dom\n seconds_spent += seconds_interval",
"def wait_until_done(self, timeout=10.0):\r\n cfunc = lib_importer.windll.DAQmxWaitUntilTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle, ctypes.c_double]\r\n\r\n error_code = cfunc(self._handle, timeout)\r\n check_for_error(error_code)"
]
| [
"0.5682072",
"0.56637263",
"0.5506973",
"0.54727566",
"0.54668844",
"0.54668844",
"0.54668844",
"0.54668844",
"0.53185624",
"0.5282941",
"0.5264092",
"0.5264092",
"0.52434653",
"0.52239007",
"0.5200853",
"0.51965016",
"0.5195598",
"0.5181763",
"0.5116212",
"0.51093733",
"0.5096917",
"0.50869864",
"0.50584906",
"0.5042291",
"0.5040116",
"0.50396466",
"0.5015087",
"0.5010649",
"0.49918726",
"0.49835587"
]
| 0.5684494 | 0 |
Set servo of 'axes' to 'states'. Calls RNP for openloop axes and waits for servo operation to finish if appropriate. EAX is enabled for closedloop axes. | def setservo(pidevice, axes, states=None, toignore=None, **kwargs):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
if not pidevice.HasSVO():
return False
if not axes:
return True
axes, states = getitemsvaluestuple(axes, states)
if pidevice.HasRNP():
axestorelax = [axis for axis, state in list(getservo(pidevice, axes).items()) if not state]
if axestorelax:
pidevice.RNP(axestorelax, [0.0] * len(axestorelax))
waitonready(pidevice, **kwargs)
eaxaxes = [axes[i] for i in range(len(axes)) if states[i]]
enableaxes(pidevice, axes=eaxaxes, **kwargs)
success = True
toignore = [] if toignore is None else toignore
toignore = [toignore] if not isinstance(toignore, list) else toignore
toignore += [gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO, gcserror.E23_PI_CNTR_ILLEGAL_AXIS]
for i, axis in enumerate(axes):
try:
pidevice.SVO(axis, states[i])
except GCSError as exc: # no GCSRaise() because we want to log a warning
if exc in toignore:
debug('could not set servo for axis %r to %s: %s', axis, states[i], exc)
success = False
else:
raise
waitonready(pidevice, **kwargs)
return success | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)",
"def _ref_with_pos(self, axes):\n debug('ControllerStartup._ref_with_pos(axes=%s)', axes)\n assert self.pidevice.HasPOS(), 'controller does not support the POS command'\n self.pidevice.RON(axes, [False] * len(axes))\n self.pidevice.POS(axes, [0.0] * len(axes))\n waitonready(self.pidevice, **self._kwargs)\n self.pidevice.SVO(axes, [True] * len(axes)) # A following qONT will fail if servo is disabled.",
"def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')",
"def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)",
"def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1",
"def servo_active(self, *args, **kwargs) -> Any:\n pass",
"def drawAxes(t):\r\n t.speed(0)\r\n t.pd()\r\n t.forward(500)\r\n t.back(500)",
"def _autozero(self, axes):\n debug('ControllerStartup._autozero(axes=%s)', axes)\n self.pidevice.ATZ(axes, ['NaN'] * len(axes))\n waitonautozero(self.pidevice, axes, **self._kwargs)\n setservo(self.pidevice, axes, [True] * len(axes), **self._kwargs)\n moveandwait(self.pidevice, axes, [0.0] * len(axes), **self._kwargs)",
"def resetservo(self):\n debug('ControllerStartup.resetservo()')\n if self.servostates is not None:\n setservo(self.pidevice, self.servostates)\n elif self._databuf['servobuf']:\n setservo(self.pidevice, self._databuf['servobuf'])",
"def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()",
"def turn_on(self):\n # read out the current pose of the robot\n configuration = self.robot.get_all_servo_position()\n\n # interpolate to the default position\n interpolation_time = 3000 # ms\n interpolation_steps = interpolation_time // TIME_FRAME\n\n speed = np.zeros(18)\n for i in range(18):\n speed[i] = (SERVOS_BASE[i] - configuration[i]) / interpolation_steps\n\n # execute the motion\n for t in range(interpolation_steps):\n self.robot.set_all_servo_position(configuration + t * speed)",
"def progMode(state):\n\t# Envoie la commande setTorque a tous les servos\n\taxDriver.setTorque(axDriver.BROADCASTID, state)",
"def _set_pwm(self, raw_values):\n for i in range(len(self._pins)):\n self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])",
"def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )",
"def _ref_with_refcmd(self, axes, refmode):\n debug('ControllerStartup._ref_with_refcmd(axes=%s, refmode=%s)', axes, refmode)\n for axis in axes:\n if self.pidevice.HasRON():\n try:\n self.pidevice.RON(axis, True)\n except GCSError as exc:\n if exc == gcserror.E34_PI_CNTR_CMD_NOT_ALLOWED_FOR_STAGE:\n pass # hexapod axis\n else:\n raise\n try:\n getattr(self.pidevice, refmode)(axis)\n except GCSError as exc:\n if exc == gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO:\n self._databuf['servobuf'][axis] = getservo(self.pidevice, axis)[axis]\n self.pidevice.SVO(axis, not self._databuf['servobuf'][axis])\n getattr(self.pidevice, refmode)(axis)\n else:\n raise\n if self.pidevice.devname in ('C-843',):\n waitonreferencing(self.pidevice, axes=axis, **self._kwargs)\n waitonready(self.pidevice)",
"def set_axes(self, a):\r\n self.axes = a",
"def set_orientation(self, axes):\n if debug:\n logger.debug('set_orientation ...')\n logger.debug('%s -> %s', str(self.axes_names), str(axes))\n\n if set(axes) != set(self.axes_names):\n raise Exception('Required orientation %s does not contain '\n 'all axes %s' % (str(axes), str(self.axes_names)))\n\n if axes == self.axes_names: # already in the asked orientation\n return\n\n for i, axis in enumerate(axes):\n logger.debug('Rolling axis %s, cur pos=%d -> dest pos=%d',\n axis, self.axes_names.index(axis), i)\n logger.debug('Shape: %s', str(self.data.shape))\n cur_i = self.axes_names.index(axis)\n self.data = np.rollaxis(self.data, cur_i, i)\n self.axes_names.pop(cur_i)\n self.axes_names.insert(i, axis)\n logger.debug('After rolling. Shape: %s, new axes: %s',\n str(self.data.shape), str(self.axes_names))\n logger.debug('')\n\n self.axes_ids = dict([(a, i) for i, a in enumerate(self.axes_names)])",
"def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])",
"def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }",
"def servo_force(self, *args, **kwargs) -> Any:\n pass",
"def reset_pose():\n rospy.wait_for_service('/drl/set_model_state')\n try:\n reset_pose_proxy = rospy.ServiceProxy(\n '/drl/set_model_state', ResetPosition)\n reset_pose_proxy(True)\n except rospy.ServiceException, ex:\n print \"Service call reset_pose failed: %s\" % ex",
"def _apply_toffoli(self, state, axes, **kwargs):\n cntrl_max = np.argmax(axes[:2])\n cntrl_min = cntrl_max ^ 1\n ndim = self._ndim(state)\n sl_a0 = _get_slice(0, axes[cntrl_max], ndim)\n sl_a1 = _get_slice(1, axes[cntrl_max], ndim)\n sl_b0 = _get_slice(0, axes[cntrl_min], ndim - 1)\n sl_b1 = _get_slice(1, axes[cntrl_min], ndim - 1)\n\n # If both controls are smaller than the target, shift the target axis down by two. If one\n # control is greater and one control is smaller than the target, shift the target axis\n # down by one. If both controls are greater than the target, leave the target axis as-is.\n if axes[cntrl_min] > axes[2]:\n target_axes = [axes[2]]\n elif axes[cntrl_max] > axes[2]:\n target_axes = [axes[2] - 1]\n else:\n target_axes = [axes[2] - 2]\n\n # state[sl_a1][sl_b1] gives us all of the amplitudes with a |11> for the two control qubits.\n state_x = self._apply_x(state[sl_a1][sl_b1], axes=target_axes)\n state_stacked_a1 = self._stack([state[sl_a1][sl_b0], state_x], axis=axes[cntrl_min])\n return self._stack([state[sl_a0], state_stacked_a1], axis=axes[cntrl_max])",
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def superpositions(self, states, fps = 30, total_time = 20, **kw):\n\n total_frames = fps * total_time\n from .complex_slider_widget import ComplexSliderWidget\n eigenstates = self.eigenstates.array\n coeffs = None\n get_norm_factor = lambda psi: 1.0/np.sqrt(np.sum(psi*np.conj(psi)))\n animation_data = {'ticks': 0, 'norm': get_norm_factor(eigenstates[0]),\n 'is_paused': False}\n psi0 = eigenstates[0]*get_norm_factor(eigenstates[0])\n if isinstance(states, int) or isinstance(states, float):\n coeffs = np.array([1.0 if i == 0 else 0.0 for i in range(states)],\n dtype=np.complex128)\n eigenstates = eigenstates[0: states]\n else:\n coeffs = states\n eigenstates = eigenstates[0: len(states)]\n states = len(states)\n psi0 = np.tensordot(coeffs, eigenstates, 1)\n animation_data['norm'] = get_norm_factor(psi0)\n psi0 *= animation_data['norm']\n energies = self.eigenstates.energies\n params = {'dt': 0.001, \n 'xlim': [-self.eigenstates.extent/2.0, \n self.eigenstates.extent/2.0],\n 'save_animation': False,\n 'frames': 120\n }\n for k in kw.keys():\n params[k] = kw[k]\n\n plt.style.use(\"dark_background\")\n fig = plt.figure(figsize=(16/9 *5.804 * 0.9,5.804)) \n grid = plt.GridSpec(5, states)\n ax = fig.add_subplot(grid[0:3, 0:states])\n \n ax.set_xlabel(\"[Å]\")\n x = np.linspace(-self.eigenstates.extent/2.0,\n self.eigenstates.extent/2.0,\n len(eigenstates[0]))\n ax.set_yticks([])\n ax.set_xlim(np.array(params['xlim'])/Å)\n\n line1, = ax.plot(x/Å, np.real(eigenstates[0]), label='$Re|\\psi(x)|$')\n line2, = ax.plot(x/Å, np.imag(eigenstates[0]), label='$Im|\\psi(x)|$')\n line3, = ax.plot(x/Å, np.abs(eigenstates[0]), label='$|\\psi(x)|$', color='white')\n ax.set_ylim(-1.7*np.amax(np.abs(psi0)), 1.7*np.amax(np.abs(psi0)))\n ax.legend()\n\n def make_update(n):\n def update(phi, r):\n animation_data['is_paused'] = True\n coeffs[n] = r*np.exp(1.0j*phi)\n psi = np.tensordot(coeffs, eigenstates, 1)\n animation_data['norm'] = get_norm_factor(psi)\n line1.set_ydata(np.real(psi))\n line2.set_ydata(np.imag(psi))\n line3.set_ydata(np.abs(psi))\n return update\n\n widgets = []\n circle_artists = []\n for i in range(states):\n circle_ax = fig.add_subplot(grid[4, i], projection='polar')\n circle_ax.set_title('n=' + str(i) # + '\\nE=' + str() + '$E_0$'\n )\n circle_ax.set_xticks([])\n circle_ax.set_yticks([])\n widgets.append(ComplexSliderWidget(circle_ax, 0.0, 1.0, animated=True))\n widgets[i].on_changed(make_update(i))\n circle_artists.append(widgets[i].get_artist())\n artists = circle_artists + [line1, line2, line3]\n\n def func(*args):\n animation_data['ticks'] += 1\n e = 1.0\n if animation_data['is_paused']:\n animation_data['is_paused'] = False\n else:\n e *= np.exp(-1.0j*energies[0:states]*params['dt'])\n np.copyto(coeffs, coeffs*e)\n norm_factor = animation_data['norm']\n psi = np.tensordot(coeffs*norm_factor, eigenstates, 1)\n line1.set_ydata(np.real(psi))\n line2.set_ydata(np.imag(psi))\n line3.set_ydata(np.abs(psi))\n if animation_data['ticks'] % 2:\n return [line1, line2, line3]\n else:\n for i, c in enumerate(coeffs):\n phi, r = np.angle(c), np.abs(c)\n artists[i].set_xdata([phi, phi])\n artists[i].set_ydata([0.0, r])\n return artists\n a = animation.FuncAnimation(fig, func, blit=True, interval=1000.0/60.0,\n frames=None if (not params['save_animation']) else\n total_frames)\n if params['save_animation'] == True:\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=fps, metadata=dict(artist='Me'), \n bitrate=-1)\n a.save('animation.mp4', writer=writer)\n return\n plt.show()",
"def sendMouvementStep(positions):\n\tprogMode(True) # Active le couple des servos\n\tfor servo in positions: # Pour chaque servo\n\t\t# Ecrit la position dans le registre de chaque servo\n\t\taxDriver.setPosition(servo, positions[servo])\n\taxDriver.action(axDriver.BROADCASTID) # Tous les servos bougent",
"def right_angle_axes(self, right_angle_axes):\n\n self.container['right_angle_axes'] = right_angle_axes",
"def sliderChange(self):\n for rdout, sldr in zip(self.joint_slider_rdouts, self.joint_sliders):\n rdout.setText(str(sldr.value()))\n\n self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + \"%\")\n self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + \"%\")\n\n # Do nothing if the rexarm is not initialized\n if self.rexarm.initialized:\n self.rexarm.set_torque_limits([self.ui.sldrMaxTorque.value() / 100.0] * self.rexarm.num_joints)\n self.rexarm.set_speeds_normalized_all(self.ui.sldrSpeed.value() / 100.0)\n joint_positions = np.array([sldr.value() * D2R for sldr in self.joint_sliders])\n # Only send the joints that the rexarm has\n self.rexarm.set_positions(joint_positions[0:self.rexarm.num_joints])",
"def run(self, num_episodes):\n for _ in xrange(num_episodes):\n self._env.reset()\n curr_state = self._env.state\n while not self._env.is_terminal(curr_state):\n reward = self._policy.take_action_and_get_reward()\n next_state = self._env.state\n self._update_parameters(curr_state, reward, next_state)\n curr_state = next_state\n # Estimate the TD-fixpoint.\n self.theta = np.dot(np.linalg.pinv(self._A), self._b)\n # Calculate current MSVE.\n self._calc_msve()",
"def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))",
"def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())"
]
| [
"0.56432706",
"0.5405429",
"0.5404202",
"0.5402343",
"0.54009134",
"0.5158554",
"0.50665253",
"0.50475216",
"0.5013823",
"0.49779767",
"0.49649036",
"0.4883331",
"0.48517424",
"0.4835422",
"0.48264295",
"0.48081583",
"0.47999898",
"0.4750046",
"0.47117424",
"0.47070265",
"0.47029904",
"0.47017142",
"0.46984404",
"0.46972162",
"0.46922633",
"0.4689906",
"0.46873426",
"0.46736935",
"0.46603504",
"0.46547043"
]
| 0.6717641 | 0 |
Wait until all 'axes' are on phase. | def waitonphase(pidevice, axes=None, timeout=300, predelay=0, postdelay=0, polldelay=0.1):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
axes = getaxeslist(pidevice, axes)
if not axes:
return
waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)
maxtime = time() + timeout
while not all([x > -1.0 for x in pidevice.qFPH(axes).values()]):
if time() > maxtime:
raise SystemError('waitonphase() timed out after %.1f seconds' % timeout)
sleep(polldelay)
sleep(postdelay) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findphase(self):\n debug('ControllerStartup.findphase()')\n if not self.pidevice.HasFPH() or self.prop['skipfph']:\n return\n if not self._databuf['cstdone']:\n debug('no need to do find phase for axes %r', self.pidevice.axes)\n return\n for axis in self._databuf['cstdone']:\n if self.pidevice.qFRF(axis)[axis]:\n self.pidevice.FPH(axis)\n waitonphase(self.pidevice, **self._kwargs)\n self.pidevice.WPA()\n else:\n info('skip find phase for axis while axis %s is not referenced' % axis)",
"def enableaxes(self):\n debug('ControllerStartup.enableaxes()')\n if not self.pidevice.HasEAX() or self.prop['skipeax']:\n return\n for axis in self.pidevice.axes:\n try:\n self.pidevice.EAX(axis, True)\n except GCSError as exc:\n if exc != gcserror.E2_PI_CNTR_UNKNOWN_COMMAND:\n raise\n waitonready(self.pidevice, **self._kwargs)",
"def done(self):\n if not self._isSubplot:\n raise Exception(\"You are not in a subplotting context!\")\n self.__exit__()",
"def wait(self):\n self.event.wait()",
"def wait(self, cycles):\n\t\tpass",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def a_subarray_in_the_idle_state():",
"def wait(self):\n self.mainloop().wait()",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def mpl_plot_was_rendered():\n figs_before = plt.gcf().number\n yield\n figs_after = plt.gcf().number\n assert figs_after > figs_before",
"def solvestep(self):\n if self.__n>=self.__nt:\n return True\n \n for self.__n in range (self.__n,self.__n+self.__output):\n self.__timestep()\n\n self.__plotcallback(self.__u[self.pml_length:-self.pml_length,\n self.pml_length:-self.pml_length],\n self.__n*self.__dt)\n time.sleep(self.__slowdown)\n \n return False",
"def wait(self):\n while not self.done:\n self.device._handle_events(1000)",
"def wait_empty(cls):\n for recorder in cls.recorders:\n recorder._wait_empty()",
"def tick(self):\n\n #tick each animation, and remember any animations which have finished\n #remove any finished ones from the dictionary\n finished = []\n for key, anim in self.animations.items():\n anim.tick()\n if not anim.active:\n finished.append(key) \n for key in finished:\n self.animations.pop(key)",
"def after_all_sweeps(self):\r\n _debug('GUISignalGenerator: after_all_sweeps()')\r\n self.window.sleep(0.05)",
"def run(self):\n\t\twhile 1:\n\t\t\tif self._driveSystem.port_open == True and self._parent.aborted==False:\n\n\t\t\t\tself._driveSystem.check_encoder_pos()\n\t\t\t\tpos=self._driveSystem.positions\n\t\t\t\tif self._parent.printRequest==True: #Print positions when print Button was pressed\n\t\t\t\t\toutput=\"Axis 1: \"+str(pos[0])+\"\\nAxis 2: \"+str(pos[1])+\"\\nAxis 3: \"+str(pos[2])+\"\\nAxis 4: \"+str(pos[3])\n\t\t\t\t\tprint(output)\n\t\t\t\t\tself._parent.printRequest=False\n\t\t\t\tevent = PosUpdateEvent(myEVT_POSUPDATE, -1, pos)\n\t\t\t\twx.PostEvent(self._parent.matplotpanel, event)\n\n\t\t\t\tt=0\n\t\t\t\twhile t<UPDATE_TIME:\n\t\t\t\t\tself.checkQ()\n\t\t\t\t\ttime.sleep(REAC_TIME)\n\t\t\t\t\tt=t+REAC_TIME\n\t\t\telse:\n\t\t\t\ttime.sleep(1)",
"def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)",
"def before_sweep(self):\r\n _debug('GUISignalGenerator: before_sweep()')\r\n self.window.sleep(0.05)",
"def _go_to_axes(self, session, el=None, az=None, third=None):\n move_defs = []\n for axis_name, short_name, target in [\n ('Azimuth', 'az', az),\n ('Elevation', 'el', el),\n ('Boresight', 'third', third),\n ]:\n if target is not None:\n move_defs.append(\n (short_name, self._go_to_axis(session, axis_name, target)))\n if len(move_defs) is None:\n return True, 'No motion requested.'\n\n moves = yield DeferredList([d for n, d in move_defs])\n all_ok, msgs = True, []\n for _ok, result in moves:\n if _ok:\n all_ok = all_ok and result[0]\n msgs.append(result[1])\n else:\n all_ok = False\n msgs.append(f'Crash! {result}')\n\n if all_ok:\n msg = msgs[0]\n else:\n msg = ' '.join([f'{n}: {msg}' for (n, d), msg in zip(move_defs, msgs)])\n return all_ok, msg",
"def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )",
"def end_phase():\n pass",
"def wait_for_everyone():\n PartialState().wait_for_everyone()",
"def call_back(self):\n\n # Poll the pipe\n while self.pipe.poll():\n # Look inside of the pipe and take the_box\n the_box = self.pipe.recv()\n\n # If the_box is empty, it's game over\n if the_box is None:\n self.terminate()\n return False\n\n # Otherwise, update the plot with the tools in the_box\n else:\n # Get our elapsed time\n elapsed_time = time.time() - the_box[0]\n\n # Add the elements to the plot\n self.ax1.plot(elapsed_time, the_box[1], c='tab:orange',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax1.plot(elapsed_time, the_box[2], c='tab:blue',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n self.ax2.plot(elapsed_time, the_box[3], c='tab:pink',\n marker=r'$\\clubsuit$', alpha=0.5,\n markersize=10)\n\n current_gen = the_box[-2]\n generations = the_box[-1]\n if current_gen == generations - 1:\n x, y = self.get_path_coordinates(the_box[4], the_box[5], the_box[6])\n self.ax3.plot(y, x, c='tab:olive', marker=r'$\\clubsuit$',\n alpha=0.5, markersize=10)\n\n\n # Redraw the canvas\n self.fig.canvas.draw()\n return True",
"def verify_queue_empty(self):\n self.assert_sample_queue_size(DataParticleType.VELOCITY_PARTICLE, 0)\n self.assert_sample_queue_size(DataParticleType.TIME_PARTICLE, 0)",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")"
]
| [
"0.5853217",
"0.56518406",
"0.5579938",
"0.5540883",
"0.55369157",
"0.5502633",
"0.54975456",
"0.54909974",
"0.5422398",
"0.5422398",
"0.5422398",
"0.5422398",
"0.5417364",
"0.5417364",
"0.54161793",
"0.54110414",
"0.53737473",
"0.53620887",
"0.534525",
"0.53452307",
"0.53350705",
"0.526706",
"0.5236677",
"0.52294815",
"0.52206296",
"0.51947755",
"0.51772344",
"0.51681787",
"0.5165114",
"0.5161171"
]
| 0.59770775 | 0 |
Wait until all macros are finished, then query and raise macro error. | def waitonmacro(pidevice, timeout=300, predelay=0, polldelay=0.1):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)
maxtime = time() + timeout
waitonready(pidevice, timeout=timeout, predelay=predelay, polldelay=polldelay)
assert pidevice.HasqRMC() or pidevice.HasIsRunningMacro(), 'device does not support wait on macro'
while True:
if pidevice.HasqRMC() and not pidevice.qRMC().strip():
break
if pidevice.HasIsRunningMacro() and not pidevice.IsRunningMacro():
break
if time() > maxtime:
stopall(pidevice)
raise SystemError('waitonmacro() timed out after %.1f seconds' % timeout)
sleep(polldelay)
if pidevice.HasMAC_qERR():
errmsg = pidevice.MAC_qERR().strip()
if errmsg and int(errmsg.split('=')[1].split()[0]) != 0:
raise GCSError(gcserror.E1012_PI_CNTR_ERROR_IN_MACRO, message=errmsg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interrupted(self):\n print(\"Macro interrupted!\")",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def fatal_error_processor(self):\n while True:\n _ = (yield)\n self.failed = True\n self.converged = False\n self.solve_completed = False",
"def inject_failure(self):\n while len(self.circQ):\n yield self.env.timeout(time_to_failure())\n if len(self.circQ) > 0 and \\\n not self.currentProc.broken and \\\n self.currentProc.workLeft > 0:\n # Only break the machine if it is currently computing,\n # and if current proc is not restarting\n # TODO: Allow errors to be thrown while restarting\n self.BqLog(\"Injecting a failure in %s\" % (self.currentProc.name))\n self.numFailures += 1\n self.process.interrupt(cause=\"failure\")",
"def __macroRun(self):\n self.activeWindow().macroRun()",
"def check_deferred_responses(self):\n for future in self.futures:\n results = future.result()\n \n self.futures = []",
"def _expand_macros(self, macro_ids) -> None:\n if macro_ids is None:\n return\n\n for macro_id in macro_ids:\n macro_cfg = self._app_cfg['macros'][macro_id]\n\n self._sweep_stages.extend(macro_cfg.get('sweeps', []))\n self._reset_stages.extend(macro_cfg.get('resets', []))\n self._process_stages.extend(macro_cfg.get('processes', []))",
"def wait_for_interrupts(self, wait_time = 1):\n raise AssertionError(\"wait_for_interrupts function i not implemented\")",
"def run_error_messages(self):\r\n self.error = \"\"\r\n #while self.error_queue:\r\n #self.error += (self.error_messages.get(\r\n # self.error_queue.popleft, None\r\n # ) + \" \")\r\n #self.error += self.I_source.query(\"STAT:QUE?\")\r\n #print(self.error)\r\n #self.I_source.write(\"STAT:QUE:CLE\")\r\n #self.message_box.setText(self.error)\r\n #self.message_box.exec_()\r",
"def callback_run_mysql_select(queries, time=3):\n\tfor i in range(0, time):\n\t\tpass",
"def test_run_query_submit_exceptions(cbcsdk_mock):\n api = cbcsdk_mock.api\n cbcsdk_mock.mock_request(\"GET\", \"/livequery/v1/orgs/test/runs/run_id\", GET_RUN_RESP)\n run_query = api.select(Run).where(\n \"SELECT path, DATETIME(atime,\\\"unixepoch\\\",\\\"localtime\\\") AS \\\"Last Accessed\\\", \"\n \"DATETIME(mtime,\\\"unixepoch\\\",\\\"localtime\\\") AS \\\"Last Modified\\\", \"\n \"DATETIME(ctime,\\\"unixepoch\\\",\\\"localtime\\\") AS \\\"Created\\\" FROM file \"\n \"WHERE path LIKE \\\"\\\\users\\\\%\\\\AppData\\\\%.exe\\\";\")\n cbcsdk_mock.mock_request(\"POST\", \"/livequery/v1/orgs/test/runs\", GET_RUN_RESP)\n result = run_query.submit()\n assert run_query._query_token is not None\n # raise ApiError when the query has already been submitted\n with pytest.raises(ApiError):\n run_query.submit()\n assert result.status == \"COMPLETE\"\n # raise ApiError when the query is missing SQL to run\n new_query = api.select(Run).name(\"myRunName\")\n with pytest.raises(ApiError):\n new_query.submit()",
"def sequence_macros(macros):\n\n def foo():\n for m in macros:\n m()\n\n return foo",
"def test_later_failure_result(self):\n d = Deferred()\n dr = EventualResult(d, None)\n result_list = []\n done = append_in_thread(result_list, dr.wait, 100)\n time.sleep(0.1)\n d.errback(RuntimeError())\n done.wait(100)\n self.assertEqual(\n (result_list[0], result_list[1].__class__), (False, RuntimeError))",
"def _wait_for_all_operations_done(self):\n while self._test_names_to_processes:\n time.sleep(10)\n running_test_names = list(self._test_names_to_processes.keys())\n for test_name in running_test_names:\n running_proc = self._test_names_to_processes.get(test_name)\n return_code = running_proc.poll()\n if return_code is not None:\n test_case_state = self._test_names_to_test_states.get(test_name)\n self._handle_failure(running_proc, test_case_state.running_test)\n del self._test_names_to_processes[test_name]\n print('Started validating: {}'.format(test_name))\n test_case_state.running_test.validate_result()\n self._run_test(test_case_state.remaining_tests)",
"def _raise_error_fast(self):\n\n # Find the first job whose status is TASK_ERROR if it exists.\n with self._lock:\n error_job = next((job for job in self._jobs\n if job.status == TASK_ERROR), None)\n\n # If this error job exists, immediatly raise the error by\n # calling get_result. This job might not exists if abort has been\n # called directly or if the generator is gc'ed.\n if error_job is not None:\n error_job.get_result(self.timeout)",
"def _execute_anybodycon(macro,\n logfile,\n anybodycon_path=get_anybodycon_path(),\n timeout=3600,\n keep_macrofile=False,\n env=None):\n if not os.path.isfile(anybodycon_path):\n raise IOError(\"Can not find anybodycon.exe: \" + anybodycon_path)\n macro_filename = os.path.splitext(logfile.name)[0] + '.anymcr'\n with open(macro_filename, 'w+b') as macro_file:\n macro_file.write('\\n'.join(macro).encode('UTF-8'))\n macro_file.flush()\n anybodycmd = [os.path.realpath(anybodycon_path),\n '--macro=', macro_file.name, '/ni']\n if sys.platform.startswith(\"win\"):\n # Don't display the Windows GPF dialog if the invoked program dies.\n # See comp.os.ms-windows.programmer.win32\n # How to suppress crash notification dialog?, Jan 14,2004 -\n # Raymond Chen's response [1]\n SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN\n ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)\n subprocess_flags = 0x8000000 # win32con.CREATE_NO_WINDOW?\n else:\n subprocess_flags = 0\n try:\n # Check global module flag to avoid starting processes after\n # the user cancelled the processes\n proc = Popen(anybodycmd,\n stdout=logfile,\n stderr=logfile,\n creationflags=subprocess_flags,\n env=env)\n _subprocess_container.add(proc.pid)\n timeout_time = time.clock() + timeout\n while proc.poll() is None:\n if time.clock() > timeout_time:\n proc.terminate()\n proc.communicate()\n logfile.seek(0, 2)\n logfile.write('ERROR: Timeout after {:d} sec.'.format(timeout))\n break\n time.sleep(0.05)\n _subprocess_container.remove(proc.pid)\n if proc.returncode == _KILLED_BY_ANYPYTOOLS:\n logfile.write('Anybodycon.exe was interrupted by AnyPyTools')\n elif proc.returncode:\n logfile.write('ERROR: anybodycon.exe exited unexpectedly.'\n ' Return code: ' + str(proc.returncode))\n if not keep_macrofile:\n silentremove(macro_file.name)\n finally:\n logfile.seek(0)\n return proc.returncode",
"def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())",
"def waitUntilSuccess():",
"def get_errors(cursor):\n while True:\n message = cursor.lpop(\"errors\")\n if message is None:\n print(\"There are no errors more\")\n return None\n print(message)",
"def test_multipleConcurrentFailure(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A)\n firstResult = resolver.query(query)\n secondResult = resolver.query(query)\n\n class ExpectedException(Exception):\n pass\n\n queries.pop()[-1].errback(failure.Failure(ExpectedException()))\n\n return defer.gatherResults([\n self.assertFailure(firstResult, ExpectedException),\n self.assertFailure(secondResult, ExpectedException)])",
"def wait(self):\n log.debug('Waiting for search to finish')\n self._searchthread.join()\n if self._exception and self._raise_errors:\n raise self._exception",
"def wait_until_not_busy(debugger, t=100):\n\n while debugger.is_busy():\n yield timeout(t)",
"def wait_for_completed(self, timeout=8):\n if not self._robots_action_dict:\n logger.error(\"MultiAction: no action is waiting\")\n return False\n robot_id_executing = [] # dictionary can`t change size during iteration, so use list\n start_time = time.time()\n spent_time = 0\n final_result = False\n for robot_id in self._robots_action_dict.keys():\n robot_id_executing.append(robot_id)\n logger.info(\n \"MultiAction: Group action start waiting for completed, {0}\".format(\n self._robots_action_dict[robot_id_executing[0]]))\n while spent_time < timeout:\n cur_time = time.time()\n spent_time = cur_time - start_time\n if not robot_id_executing:\n logger.info(\"MultiAction: wait_for_all_completed. All of robots are completed\")\n final_result = True\n break\n for robot_id, robot_action in self._robots_action_dict.items():\n if self._robots_action_dict[robot_id].is_completed and (robot_id in robot_id_executing):\n action_key = self._robots_action_dict[robot_id].make_action_key()\n in_progress_list = self._robots_action_dict[robot_id]._obj._in_progress\n # make sure the action has been removed form _in_progress\n if action_key not in in_progress_list:\n robot_id_executing.remove(robot_id)\n logger.info(\n \"MultiAction: wait_for_all_completed. Robot id ({0}) action is completed, \"\n \"action: {1}\".format(robot_id, self._robots_action_dict[robot_id]))\n time.sleep(0.05)\n # timeout\n if not final_result:\n for robot_id, robot_action in self._robots_action_dict.items():\n if not robot_action.is_completed:\n robot_action._changeto_state(action.ACTION_EXCEPTION)\n logger.warning(\n \"MultiAction: wait_for_all_completed, timeout! Robot id {}, action {}\".format(\n robot_id, self._robots_action_dict[robot_id]))\n else:\n # each robot has completed its action\n logger.info(\"MultiAction: wait for all completed successfully, action {0}\".format(self._robots_action_dict))\n return final_result",
"def need_completion_reset(queries):\n for query in sqlparse.split(queries):\n try:\n first_token = query.split()[0]\n if first_token.lower() in ('use', '\\\\u'):\n return True\n except Exception:\n return False",
"def get_macro(name, macros):\n for themacro in (macro for macro in macros if macro.get('name') == name):\n if themacro:\n return(themacro)\n else:\n pass",
"def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)",
"def need_completion_refresh(queries):\n for query in sqlparse.split(queries):\n try:\n first_token = query.split()[0]\n if first_token.lower() in ('alter', 'create', 'use', '\\\\r',\n '\\\\u', 'connect', 'drop'):\n return True\n except Exception:\n return False"
]
| [
"0.53399175",
"0.5052747",
"0.5052747",
"0.5052747",
"0.5052747",
"0.50137264",
"0.501022",
"0.496639",
"0.4937988",
"0.4925063",
"0.49245542",
"0.48591673",
"0.48575124",
"0.48339245",
"0.48310927",
"0.48293483",
"0.48289663",
"0.47969922",
"0.4782267",
"0.47466642",
"0.47407964",
"0.47390264",
"0.47390008",
"0.47333303",
"0.47262698",
"0.4720855",
"0.47120827",
"0.47016302",
"0.4695905",
"0.46907955"
]
| 0.51521873 | 1 |
Call MOV with 'axes' and 'values' and wait for motion to finish. | def moveandwait(pidevice, axes, values=None, timeout=300):
if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):
raise TypeError('Type %s is not supported!' % type(pidevice).__name__)
if not axes:
return
pidevice.MOV(axes, values)
if isinstance(axes, dict):
axes = list(axes.keys())
waitontarget(pidevice, axes=axes, timeout=timeout) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moveto(self, x, y, z):\n t = self.moveTime * 3\n N = self.moveSamples * 3\n # read initial position for all channels\n texts = [getattr(self, ax + \"Label\").text()\n for ax in self.activeChannels]\n initPos = [re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", t)[0] for t in texts]\n\n rampx = makeRamp(float(initPos[0]), x, self.nSamples)# / convFactors['x']\n rampy = makeRamp(float(initPos[1]), y, self.nSamples)# / convFactors['y']\n rampz = makeRamp(float(initPos[2]), z, self.nSamples)# / convFactors['z']\n# ramp = np.array((rampx, rampy, rampz))\n\n tuc = ptime.time()\n for i in range(self.nSamples):\n borrar = rampx[i] + rampy[i] + rampz[i]\n# self.aotask.write([rampx[i] / convFactors['x'],\n# rampy[i] / convFactors['y'],\n# rampz[i] / convFactors['z']], auto_start=True)\n# time.sleep(t / N)\n\n print(\"se mueve todo en\", np.round(ptime.time()-tuc, 3),\"segs\")\n\n self.xLabel.setText(\"{}\".format(np.around(float(rampx[-1]), 2)))\n self.yLabel.setText(\"{}\".format(np.around(float(rampy[-1]), 2)))\n self.zLabel.setText(\"{}\".format(np.around(float(rampz[-1]), 2)))",
"def move(self, axis, dist):\n t = self.moveTime\n N = self.moveSamples\n # read initial position for all channels\n texts = [getattr(self, ax + \"Label\").text()\n for ax in self.activeChannels]\n initPos = [re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", t)[0] for t in texts]\n initPos = np.array(initPos, dtype=float)[:, np.newaxis]\n fullPos = np.repeat(initPos, self.nSamples, axis=1)\n\n # make position ramp for moving axis\n ramp = makeRamp(0, dist, self.nSamples)\n fullPos[self.activeChannels.index(axis)] += ramp\n\n# factors = np.array([convFactors['x'], convFactors['y'],\n# convFactors['z']])[:, np.newaxis]\n# fullSignal = fullPos/factors\n toc = ptime.time()\n for i in range(self.nSamples):\n# self.aotask.write(fullSignal, auto_start=True)\n# time.sleep(t / N)\n borrar = 1+1\n\n print(\"se mueve en\", np.round(ptime.time() - toc, 3), \"segs\")\n\n # update position text\n newPos = fullPos[self.activeChannels.index(axis)][-1]\n# newText = \"<strong>\" + axis + \" = {0:.2f} µm</strong>\".format(newPos)\n newText = \"{}\".format(newPos)\n getattr(self, axis + \"Label\").setText(newText)\n self.paramChanged()",
"def xyzArmCallback(msg):\n global robot\n # extract message components and normalize - joystick provides [-100,100] and \n # we will scale to [-0.1,0.1]\n arm_x = msg.data[0]/ARM_DATA_SCALING\n arm_y = msg.data[1]/ARM_DATA_SCALING\n arm_z = msg.data[2]\n\n # conditionally scale Z axis movements\n if(arm_z > 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_UP_Z\n elif(arm_z < 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_DOWN_Z\n\n if (arm_x == 0 and arm_y == 0 and arm_z == 0):\n #rospy.loginfo(\"no arm movement requested\")\n i=0 #placeholder\n\n else:\n # displacement = np.array([arm_x, arm_y, arm_z])\n # success = robot.arm.move_ee_xyz(displacement, plan=False)\n # rospy.loginfo(\"tried to move arm\")\n displacement = np.array([arm_x, arm_y, arm_z])\n t,r,q = robot.arm.pose_ee\n if (t[2] < min_z):\n rospy.loginfo(\"arm too low, safety protocol activated with z=%s\",str(t[2]))\n elif(t[2] > max_z):\n rospy.loginfo(\"arm too high, safety protocol activated with z=%s\",str(t[2]))\n else:\n translation = np.add(np.asarray(t).flatten(), displacement)\n orientation = np.asarray(r)\n ident = np.eye(3)\n orientation[:,2] = ident[:,2]\n orientation[2,:] = ident[2,:]\n robot.arm.set_ee_pose(translation, orientation, plan=False)\n rospy.loginfo(\"translation was %s\", str(translation))\n rospy.loginfo(\"orientation was %s\", str(orientation))",
"def movetomiddle(pidevice, axes=None):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n axes = getaxeslist(pidevice, axes)\n if not axes:\n return\n rangemin = pidevice.qTMN(axes)\n rangemax = pidevice.qTMX(axes)\n targets = {}\n for axis in axes:\n targets[axis] = rangemin[axis] + (rangemax[axis] - rangemin[axis]) / 2.0\n pidevice.MOV(targets)",
"def do_steps(self, motornum, val):\n #print \"Moving in steps...\"\n steps = abs(val)\n if val < 0:\n direction = 1\n else:\n direction = 2\n mag = steps\n\n self.takesteps(mag=mag, direction=direction, motornum=motornum)\n self.do_azangle()\n self.do_altangle()",
"def GUI_move(self, axis):\n self.set_pos = [0,0,0]\n entry = self.coo_ent[axis].get()\n if entry == \"\":\n entry = \"0\"\n try:\n entry = int(entry)\n except Exception as e:\n print \"Invalid entry. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n return\n if entry > 50000 or entry < -50000:\n print \"Stage movement too large\"\n return\n self.set_pos[axis] = entry\n print \"move %s = %d\"%(self.POS_NAME[axis], self.set_pos[axis])\n self.Move_Stage()\n self.Read_Encoders()\n self.GUI_Write_Encoder_Values()\n return",
"def servo_dispenser_val_callback(self, msg):\n\n # callback\n self.servo_dispenser_val = msg.data\n\n # don't move if multiple buttons pressed, or none are pressed\n if (self.move_horizontal == 0 and self.move_vertical == 0):\n print('reset -- return to 0 degrees')\n if self.servo_dispenser_val != 0:\n self.servo_dispenser_speed = -SERVO_DISPENSER_SPEED\n self.pub_servo_dispenser_speed.publish(self.servo_dispenser_speed)\n\n # up (+)\n elif (self.move_vertical > 0):\n print('turn -- go to {} degrees'.format(SERVO_MAX))\n if self.servo_dispenser_val < SERVO_MAX:\n self.servo_dispenser_speed = SERVO_DISPENSER_SPEED\n self.pub_servo_dispenser_speed.publish(self.servo_dispenser_speed)",
"def _go_to_axes(self, session, el=None, az=None, third=None):\n move_defs = []\n for axis_name, short_name, target in [\n ('Azimuth', 'az', az),\n ('Elevation', 'el', el),\n ('Boresight', 'third', third),\n ]:\n if target is not None:\n move_defs.append(\n (short_name, self._go_to_axis(session, axis_name, target)))\n if len(move_defs) is None:\n return True, 'No motion requested.'\n\n moves = yield DeferredList([d for n, d in move_defs])\n all_ok, msgs = True, []\n for _ok, result in moves:\n if _ok:\n all_ok = all_ok and result[0]\n msgs.append(result[1])\n else:\n all_ok = False\n msgs.append(f'Crash! {result}')\n\n if all_ok:\n msg = msgs[0]\n else:\n msg = ' '.join([f'{n}: {msg}' for (n, d), msg in zip(move_defs, msgs)])\n return all_ok, msg",
"def camera_exec():\n pygame.init()\n locals()\n\n plot_num = 0\n running, Clock, font = camera_connect()\n while running:\n Clock.tick(100)\n\n # read framebuffer\n fb = None\n while (True) :\n try:\n fb = pyopenmv.fb_dump()\n break\n except Exception as e:\n # try and reconnect on failure\n camera_connect()\n\n # signal to UArm that camera has connected\n camera_started.set()\n if fb is not None:\n # create image from RGB888\n image = pygame.image.frombuffer(fb[2].flat[0:], (fb[0], fb[1]), 'RGB')\n screen = pygame.display.set_mode((fb[0], fb[1]), pygame.DOUBLEBUF, 32)\n\n fps = Clock.get_fps()\n # blit stuff\n screen.blit(image, (0, 0))\n screen.blit(font.render(\"FPS %.2f\"%(fps), 1, (255, 0, 0)), (0, 0))\n\n # update display\n pygame.display.flip()\n\n # get output from text buffer\n tx_len = pyopenmv.tx_buf_len()\n\n # object was found by camera if there is outputted text\n if tx_len:\n\n '''\n if UArm has signaled to the camera to identify the object and the camera has not already\n assigned values to the global variables associated with the object's location\n '''\n if camera_event.is_set() and (data_ready.is_set() is False):\n\n # read the most recent data at index 0 from the text buffer\n buff = pyopenmv.tx_buf(tx_len).decode()\n split_buff = str(buff).splitlines()\n if h_angle_key in split_buff[0]:\n\n # Most recent line in buff contains needed information\n global h_angle, v_angle, is_centered\n tok = split_buff[0].split()\n\n # set angles to corresponding values determined by camera\n h_angle, v_angle = float(tok[1]), float(tok[3])\n if tok[5] == \"True\":\n is_centered = True\n else:\n is_centered = False\n # signal that global variables have been set\n data_ready.set()\n\n if plot_ready.is_set():\n print(\"success_rate: \", success_history)\n plot_distance(distance_history, plot_num)\n plot_success(success_history, plot_num)\n plot_num += 1\n plot_ready.clear()\n print(\"success rate for \", len(success_history), \" tests: \",\n success_history.count(True) / len(success_history))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_c:\n pygame.image.save(image, \"capture.png\")\n\n pygame.quit()\n pyopenmv.stop_script()",
"def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])",
"def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)",
"def move(self, motion, **kwargs):\n motion_err = kwargs.pop('err', self.motion_err)\n sigma = np.abs(motion) * motion_err\n e = np.expand_dims(np.random.randn(2), 1)\n e *= sigma\n\n self.pose[0] += motion[0] + e[0]\n self.pose[1] += motion[1] + e[1]",
"def _go_to_axis(self, session, axis, target):\n # Step time in event loop.\n TICK_TIME = 0.1\n\n # Time for which to sample distance for \"still\" and \"moving\"\n # conditions.\n PROFILE_TIME = 1.\n\n # When aborting, how many seconds to use to project a good\n # stopping position (d = v*t)\n ABORT_TIME = 2.\n\n # Threshold (deg) for declaring that we've reached\n # destination.\n THERE_YET = 0.01\n\n # How long to wait after initiation for signs of motion,\n # before giving up. This is normally within 2 or 3 seconds\n # (SATP), but in \"cold\" cases where siren needs to sound, this\n # can be as long as 12 seconds.\n MAX_STARTUP_TIME = 13.\n\n # Velocity to assume when computing maximum time a move should take (to bail\n # out in unforeseen circumstances).\n UNREASONABLE_VEL = 0.5\n\n # Positive acknowledgment of AcuControl.go_to\n OK_RESPONSE = b'OK, Command executed.'\n\n # Enum for the motion states\n State = Enum(f'{axis}State',\n ['INIT', 'WAIT_MOVING', 'WAIT_STILL', 'FAIL', 'DONE'])\n\n # Specialization for different axis types.\n # pos/mode are common:\n def get_pos():\n return self.data['status']['summary'][f'{axis}_current_position']\n\n def get_mode():\n return self.data['status']['summary'][f'{axis}_mode']\n\n # vel/goto are different:\n if axis in ['Azimuth', 'Elevation']:\n def get_vel():\n return self.data['status']['summary'][f'{axis}_current_velocity']\n\n if axis == 'Azimuth':\n @inlineCallbacks\n def goto(target):\n result = yield self.acu_control.go_to(az=target)\n return result\n else:\n @inlineCallbacks\n def goto(target):\n result = yield self.acu_control.go_to(el=target)\n return result\n\n elif axis in ['Boresight']:\n def get_vel():\n return 0.\n\n @inlineCallbacks\n def goto(target):\n result = yield self.acu_control.go_3rd_axis(target)\n return result\n\n else:\n return False, f\"No configuration for axis={axis}\"\n\n limit_func, _ = self._get_limit_func(axis)\n\n # History of recent distances from target.\n history = []\n\n def get_history(t):\n # Returns (ok, hist) where hist is roughly the past t\n # seconds of position data and ok is whether or not\n # that much history was actually available.\n n = int(t // TICK_TIME) + 1\n return (n <= len(history)), history[-n:]\n\n last_state = None\n state = State.INIT\n start_time = None\n motion_aborted = False\n assumption_fail = False\n motion_completed = False\n give_up_time = None\n has_never_moved = True\n\n while session.status in ['starting', 'running', 'stopping']:\n # Time ...\n now = time.time()\n if start_time is None:\n start_time = now\n time_since_start = now - start_time\n motion_expected = time_since_start > MAX_STARTUP_TIME\n\n # Space ...\n current_pos, current_vel = get_pos(), get_vel()\n distance = abs(target - current_pos)\n history.append(distance)\n if give_up_time is None:\n give_up_time = now + distance / UNREASONABLE_VEL \\\n + MAX_STARTUP_TIME + 2 * PROFILE_TIME\n\n # Do we seem to be moving / not moving?\n ok, _d = get_history(PROFILE_TIME)\n still = ok and (np.std(_d) < 0.01)\n moving = ok and (np.std(_d) >= 0.01)\n has_never_moved = (has_never_moved and not moving)\n\n near_destination = distance < THERE_YET\n mode_ok = (get_mode() == 'Preset')\n\n # Log only on state changes\n if state != last_state:\n _state = f'{axis}.state={state.name}'\n self.log.info(\n f'{_state:<30} dt={now-start_time:7.3f} dist={distance:8.3f}')\n last_state = state\n\n # Handle task abort\n if session.status == 'stopping' and not motion_aborted:\n target = limit_func(current_pos + current_vel * ABORT_TIME)\n state = State.INIT\n motion_aborted = True\n\n # Turn \"too long\" into an immediate exit.\n if now > give_up_time:\n self.log.error('Motion did not complete in a timely fashion; exiting.')\n assumption_fail = True\n break\n\n # Main state machine\n if state == State.INIT:\n # Set target position and change mode to Preset.\n result = yield goto(target)\n if result == OK_RESPONSE:\n state = State.WAIT_MOVING\n else:\n self.log.error(f'ACU rejected go_to with message: {result}')\n state = State.FAIL\n # Reset the clock for tracking \"still\" / \"moving\".\n history = []\n start_time = time.time()\n\n elif state == State.WAIT_MOVING:\n # Position and mode change requested, now wait for\n # either mode change or clear failure of motion.\n if mode_ok:\n state = state.WAIT_STILL\n elif still and motion_expected:\n self.log.error(f'Motion did not start within {MAX_STARTUP_TIME:.1f} s.')\n state = state.FAIL\n\n elif state == State.WAIT_STILL:\n # Once moving, watch for end of motion.\n if not mode_ok:\n self.log.error('Unexpected axis mode transition; exiting.')\n state = State.FAIL\n elif still:\n if near_destination:\n state = State.DONE\n elif has_never_moved and motion_expected:\n # The settling time, near a soft limit, can be\n # a bit long ... so only timeout on\n # motion_expected if we've never moved at all.\n self.log.error(f'Motion did not start within {MAX_STARTUP_TIME:.1f} s.')\n state = State.FAIL\n\n elif state == State.FAIL:\n # Move did not complete as planned.\n assumption_fail = True\n break\n\n elif state == State.DONE:\n # We seem to have arrived at destination.\n motion_completed = True\n break\n\n # Keep only ~20 seconds of history ...\n _, history = get_history(20.)\n\n yield dsleep(TICK_TIME)\n\n success = motion_completed and not (motion_aborted or assumption_fail)\n\n if success:\n msg = 'Move complete.'\n elif motion_aborted:\n msg = 'Move aborted!'\n else:\n msg = 'Irregularity during motion!'\n return success, msg",
"def move(axis, value):\n #print(\"moving\", axis, value)\n if axis in sample:\n sample[axis] = value\n elif axis == \"detectorMaskMap\":\n # Detector mask is \"0\", \"2\", \"4\", \"6\", \"8\" or \"10\"\n value = str(int(value/2)*2)\n candor.move(**{axis: value})\n elif axis == \"Q_z\":\n # Update slits to maintain constant footprint whem moving Qz\n F = sample['sample_width']\n sample_angle = candor['sampleAngleMotor']\n candor.move(**{axis: value})\n L2S = abs(candor.PRE_SAMPLE_SLIT_Z)\n L12 = abs(candor.SOURCE_APERTURE_Z - candor.PRE_SAMPLE_SLIT_Z)\n S2 = F*np.sin(np.radians(sample_angle))/(1+(1+R12)*L2S/L12)\n S1 = S2 * R12\n candor.move(slitAperture1=S1, slitAperture2=S2)\n else:\n # TODO: check that qx is capturing diffuse beam\n candor.move(**{axis: value})",
"def animate_result(vehicles, setup):\n fig = plt.figure()\n lines = []\n t, delta_t, n_steps, t_max = get_time_values_from_setup(setup)\n ax = plt.axes(xlim=(-10, setup.get('lanes').get('lane_length') + 10),\n ylim=(-1, setup.get('lanes').get('n_lanes')))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(\n artist='Philipp Froehlich'), bitrate=1800)\n\n for i in range(len(vehicles)):\n lobj = ax.plot([], [], linewidth=4)[0]\n lines.append(lobj)\n\n def init():\n for line in lines:\n line.set_data([], [])\n return lines\n\n def animate(frame_number):\n for lnum, line in enumerate(lines):\n act = np.zeros([2, 2])\n # position\n act[:, 0] = vehicles[lnum].position_archive[frame_number, 0]\n # lane\n act[:, 1] = vehicles[lnum].position_archive[frame_number, 1]\n # add saftey_distance\n act[1, 0] += vehicles[lnum].position_archive[frame_number, 2]\n # print(act)\n line.set_data(np.transpose(act))\n return tuple(lines)\n\n plt.xlabel('position [m]')\n\n plt.ylabel(' lane number')\n\n plt.title('animation of the safety bouding box of every car')\n\n anim = animation.FuncAnimation(\n fig, animate, init_func=init, frames=n_steps, interval=50, blit=True)\n\n plt.show()\n print('please wait for plot to save')\n anim.save('result/latest.mp4', writer=writer)",
"def animate_traj(traj, robot, pause=True, restore=True):\n if restore: _saver = openravepy.RobotStateSaver(robot)\n viewer = trajoptpy.GetViewer(robot.GetEnv())\n for (i,dofs) in enumerate(traj):\n print \"step %i/%i\"%(i+1,len(traj))\n robot.SetActiveDOFValues(dofs)\n if pause: viewer.Idle()\n else: viewer.Step()",
"def _go(self, cmd):\n cmdVar = self.actor.crudeCall(cmd, actor=self.enuName,\n cmdStr=f'slit linearVerticalMove expTime={self.exp.exptime} '\n f'pixelRange={self.pixelMin},{self.pixelMax}',\n timeLim=self.exp.exptime + SlitControl.timeMargin)\n\n if cmdVar.didFail:\n raise exception.SlitMoveFailed(self.enuName, cmdUtils.interpretFailure(cmdVar))\n\n return cmdVar",
"def async_move(position: tuple, time: float):\n pyautogui.moveTo(position[0], position[1], time)",
"async def _move_via_actuator_vals(self, act_val_pairs: ActuatorVals, velocity=60):\n coros = []\n\n head_args = {a.name: v for a, v in act_val_pairs.items() if a.is_head}\n if head_args:\n coros.append(self.move_head(**head_args, velocity=velocity))\n\n arm_args = {a.name.split('_')[0]: v for a, v in act_val_pairs.items() if not a.is_head}\n if arm_args:\n settings = (ArmSettings(side, val, velocity) for side, val in arm_args.items())\n coros.append(self._move_arms_via_settings(*settings))\n\n if coros:\n return await asyncio.gather(*coros)",
"def setservo(pidevice, axes, states=None, toignore=None, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n if not pidevice.HasSVO():\n return False\n if not axes:\n return True\n axes, states = getitemsvaluestuple(axes, states)\n if pidevice.HasRNP():\n axestorelax = [axis for axis, state in list(getservo(pidevice, axes).items()) if not state]\n if axestorelax:\n pidevice.RNP(axestorelax, [0.0] * len(axestorelax))\n waitonready(pidevice, **kwargs)\n eaxaxes = [axes[i] for i in range(len(axes)) if states[i]]\n enableaxes(pidevice, axes=eaxaxes, **kwargs)\n success = True\n toignore = [] if toignore is None else toignore\n toignore = [toignore] if not isinstance(toignore, list) else toignore\n toignore += [gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO, gcserror.E23_PI_CNTR_ILLEGAL_AXIS]\n for i, axis in enumerate(axes):\n try:\n pidevice.SVO(axis, states[i])\n except GCSError as exc: # no GCSRaise() because we want to log a warning\n if exc in toignore:\n debug('could not set servo for axis %r to %s: %s', axis, states[i], exc)\n success = False\n else:\n raise\n waitonready(pidevice, **kwargs)\n return success",
"def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left",
"def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left",
"def _setup_move(self, position):\n\n def done_moving(**kwargs):\n self.log.debug(\"%s async motion done\", self.name)\n self._done_moving(success=True)\n\n if self.done is None:\n # No done signal, so we rely on put completion\n moving_val = 1 - self.done_value\n self._move_changed(value=moving_val)\n\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n\n if self.actuate is not None:\n self.setpoint.put(position, wait=True)\n\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False, callback=done_moving)\n else:\n self.setpoint.put(position, wait=False, callback=done_moving)",
"def timerCallback(self,evprent):\n self._odom_list.waitForTransform('map', 'base_footprint', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('map','base_footprint', rospy.Time(0)) #finds the position and oriention of two objects relative to each other (hint: this returns arrays, while Pose uses lists)\n self._current.position.x = position[0]\n self._current.position.y = position[1]\n\n self._current.orientation.x = orientation[0]\n self._current.orientation.y = orientation[1]\n self._current.orientation.z = orientation[2]\n self._current.orientation.w = orientation[3]\n q = [self._current.orientation.x,\n self._current.orientation.y,\n self._current.orientation.z,\n self._current.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)",
"def __call__(self, *args, **kwargs):\n output, changes = self._call(self.vc.tensors(), kwargs, *args)\n self.vc.assign(changes)\n return output",
"def cozmo_go_to_pose(robot, x, y, angle_z):\n\trobot.go_to_pose(Pose(x, y, 0, angle_z=degrees(angle_z)), relative_to_robot=True).wait_for_completed()",
"def updateGraphics(board, step, caxes):\r\n boardArray= numpy.transpose(numpy.asarray(board))\r\n caxes.set_data(boardArray)\r\n plt.title('Step ' + str(step))\r\n plt.pause(constants.BLINK)\r\n plt.show()",
"def callback(self, data):\n X = data.linear.x\n Z = data.angular.z\n commands = self.controller.Compute(X, Z)\n self.last_time = time.clock()\n self.m0 = commands[1]\n self.m1 = commands[0]",
"def move():\n Robot.move()",
"def advance(distance, angle, da):\n cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)\n # How fast will we update the robot's movement?\n rate = 10\n # Set the equivalent ROS rate variable\n r = rospy.Rate(rate)\n # Set the forward linear speed to 0.2 meters per second\n if distance >= 0.0:\n linear_speed = 0.5\n else:\n linear_speed = -0.5\n # Set the travel distance in meters\n goal_distance = abs(distance)\n # Set the rotation speed in radians per second\n if angle < 0.0:\n angular_speed = -0.5\n else:\n angular_speed = 0.5\n # Set the angular tolerance in degrees converted to radians\n angular_tolerance = radians(0.5)\n # Set the rotation angle to angle in radians \n goal_angle = angle\n # Initialize the tf listener\n tf_listener = tf.TransformListener()\n # Give tf some time to fill its buffer\n rospy.sleep(2)\n # Set the map frame\n map_frame = '/map'\n # Set the odom frame\n odom_frame = '/odom'\n \"\"\" Find out if the robot uses /map->/odom transform \"\"\"\n try:\n tf_listener.waitForTransform(map_frame, odom_frame, rospy.Time(), rospy.Duration(1.0))\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /map and /odom\")\n rospy.signal_shutdown(\"tf Exception\") \n # Find out if the robot uses /base_link or /base_footprint\n try:\n tf_listener.waitForTransform(odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_footprint'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n try:\n tf_listener.waitForTransform(odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_link'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /odom and /base_link or /base_footprint\")\n rospy.signal_shutdown(\"tf Exception\") \n # Initialize the position variable as a Point type\n position = Point() \n # Initialize the movement command\n move_cmd = Twist()\n \n\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n # Keep track of the distance traveled\n dist = 0.0\n #pdb.set_trace()\n if da:\n print bcolors.OKGREEN + \"da True\" + bcolors.ENDC\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC\n # Set the movement command to forward motion\n move_cmd.linear.x = linear_speed\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n #pdb.set_trace()\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n \n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n else:\n print bcolors.OKGREEN + \"da False\" + bcolors.ENDC\n #pdb.set_trace()\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n# print \"x\", position.x\n# print \"y\", position.y\n# print \"la\", last_angle\n# print \"ta\", degrees(turn_angle)\n# print \"\\n\"\n #raw_input(\"Press ENTER to continue ...\")\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next movement\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC \n #pdb.set_trace()\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n move_cmd.linear.x = linear_speed\n # Keep track of the distance traveled\n dist = 0.0\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n\n # Stop the robot for good\n cmd_vel_pub.publish(Twist())\n rospy.sleep(1)\n\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n return (position, rotation)"
]
| [
"0.55523056",
"0.55200475",
"0.53758305",
"0.5306232",
"0.5270305",
"0.5266047",
"0.51731163",
"0.51722825",
"0.51123303",
"0.5110023",
"0.509963",
"0.5097408",
"0.5087578",
"0.5083483",
"0.5082668",
"0.5075423",
"0.5070343",
"0.50366646",
"0.5011553",
"0.5005982",
"0.49991766",
"0.49991766",
"0.49812955",
"0.4970252",
"0.49412718",
"0.49306637",
"0.4929018",
"0.4911312",
"0.49093643",
"0.49033746"
]
| 0.7088165 | 0 |
Test ability to generate csv with simple input data. | def test_csv_simple_input(self):
# Mix of integer and string data. Ensure that commas and
# quotes are escaped properly.
data = [
{
'name': 'Normal string',
'item_num': 1,
},
{
'name': 'String, with, commas',
'item_num': 2,
},
{
'name': 'String with " quote',
'item_num': 3,
},
]
table = TableReportForTesting(data)
response = table.as_csv(HttpRequest())
self.assertEqual(response.status_code, 200)
# Expect cells containing commas to be escaped with quotes.
content = response.content
if PY3:
content = content.decode(settings.DEFAULT_CHARSET).replace('\x00', '')
self.assertEqual(
content,
'Name,Item Num\r\n'
'Normal string,1\r\n'
'"String, with, commas",2\r\n'
'"String with "" quote",3\r\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_csv_no_callback(self):\n csvfile = testdata.create_csv({\n \"foo\": testdata.get_int(),\n \"bar\": testdata.get_words(),\n })\n self.assertEqual(1, len(csvfile))",
"def test_to_csv(self):\n sale = SaleFactory(total_value=12347)\n self.assertIn('12347', sale.to_csv())",
"def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])",
"def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)",
"def testExampleCSVGeneration(ref):\n df = generate_dataframe()\n outpath = os.path.join(ref.tmp_dir, 'csv_result.csv')\n df.to_csv(outpath, index=False)\n columns = ref.all_fields_except(['random'])\n ref.assertCSVFileCorrect(outpath, 'dataframe_result.csv',\n check_data=columns, check_types=columns)",
"def test_csv_wrapper():\n # Test comma-delimited output.\n data = [['abc', '1'], ['d', '456']]\n headers = ['letters', 'number']\n output = delimited_output_adapter.adapter(iter(data), headers)\n assert \"\\n\".join(output) == dedent('''\\\n letters,number\\n\\\n abc,1\\n\\\n d,456''')\n\n # Test tab-delimited output.\n data = [['abc', '1'], ['d', '456']]\n headers = ['letters', 'number']\n output = delimited_output_adapter.adapter(\n iter(data), headers, table_format='tsv')\n assert \"\\n\".join(output) == dedent('''\\\n letters\\tnumber\\n\\\n abc\\t1\\n\\\n d\\t456''')\n\n with pytest.raises(ValueError):\n output = delimited_output_adapter.adapter(\n iter(data), headers, table_format='foobar')\n list(output)",
"def sample_csv_file(tmpdir):\n csv_file = tmpdir.mkdir('sub').join('sample_phone_masts.csv')\n # header and 3 rows of test data\n csv_file.write(\n 'Property Name,Property Address,Unit Name,Tenant Name,Lease Start Date,Lease End Date,Lease Years,'\n 'Current Rent\\n'\n 'Farmhouse 2,Field X,Unit 2,CellWorks Ltd,29 Apr 2008,28 Apr 2018,10,700\\n'\n 'Farmhouse 1,Field Y,Unit 1,CellWorks Ltd,29 Apr 2002,28 Apr 2020,15,500\\n'\n 'Farmhouse 3,Field Z,Unit 3,CellWorks Ltd,01 Dec 2019,01 Dec 2021,15,999.99\\n'\n )\n return str(csv_file)",
"def createFakeCsv():\n \n df = pd.DataFrame([\"a\",\"b\",\"c\"])\n \n df.to_csv(path+'/fakeCSV.csv')\n \n os.rename('fakeCSV.csv','data.csv')",
"def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]",
"def test_csv_no_pagination(self):\n\n data = [\n {\n 'name': 'page 1',\n 'item_num': 1,\n },\n {\n 'name': 'page 2',\n 'item_num': 2,\n },\n ]\n\n table = TableReportForTesting(data)\n table.paginate(per_page=1)\n\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Ensure that even if table paginated, output is all row\n # data.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n ('Name,Item Num\\r\\n'\n 'page 1,1\\r\\n'\n 'page 2,2\\r\\n')\n )",
"def test_purchased_csv(self):\r\n report = initialize_report(\"itemized_purchase_report\", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)\r\n csv_file = StringIO.StringIO()\r\n report.write_csv(csv_file)\r\n csv = csv_file.getvalue()\r\n csv_file.close()\r\n # Using excel mode csv, which automatically ends lines with \\r\\n, so need to convert to \\n\r\n self.assertEqual(csv.replace('\\r\\n', '\\n').strip(), self.CORRECT_CSV.strip())",
"def test_itercsv_emits_data_lines():\n expected = [\n b'Hello,World\\r\\n',\n b'1,2\\r\\n',\n b'3,4\\r\\n'\n ]\n assert list(itercsv(['Hello', 'World'], [[1, 2], [3, 4]])) == expected",
"def test_itercsv_always_emits_headers():\n assert list(itercsv(['Hello', 'World'], [])) == [b'Hello,World\\r\\n']",
"def test_csv(inpath, outpath, line_width=0):\n test = SimpleCSVReporter.SimpleCSVReporter()\n test.readCSV(inpath)\n indent_tool = IndentMessages.IndentMessages()\n if line_width > 0:\n indent_tool.total_width = line_width\n output = open(outpath, 'w')\n test.report_fd = output\n test.indenter = indent_tool\n test.default_report()\n output.close()",
"def test_return_csv_string(self):\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n csv = csv_formatter.to_csv(self.records, path_or_buf=None)\n assert csv == csv_expected",
"def test_simple_export(self):\n\n self.import_file(\"assessment_full_no_warnings.csv\")\n data = [{\n \"object_name\": \"Assessment\",\n \"filters\": {\n \"expression\": {}\n },\n \"fields\": \"all\",\n }]\n response = self.export_csv(data)\n self.assertIn(u\"\\u5555\", response.data.decode(\"utf8\"))",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def create_test_csv():\n if os.path.exists(args.test):\n print(\"--Traffic input for analysis found: \", args.test)\n #quick and dirty create csv file\n headers = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > test.csv\")\n brocut = os.system(\"cat \"+str(args.test)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> test.csv\")\n \n else:\n print(\"Bro testing data input \"+str(args.test)+\" not found - needs to be in working directory\")\n exit()",
"def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_to_csv_with_valid_rows(self, mock_open):\n row_handling.to_csv(rows=self.rows, csv_path=self.csv_path)\n open.assert_called_with(self.csv_path, 'w')",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def test_add_csv_data_00(self, mocker):\n fake_fields = self.fake.pylist(10, True, str)\n fake_data = []\n for _ in range(self.fake.random_digit()):\n fake_entry = {}\n for field in fake_fields:\n fake_entry[field] = self.fake.word()\n fake_data.append(fake_entry)\n\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), [])\n g.spreadsheet = Spreadsheet(None, None)\n g.worksheet = mocker.MagicMock()\n g.worksheet.append_row = mocker.MagicMock()\n g.add_csv_data(fake_fields, fake_data)\n\n assert not g.worksheet.append_row.call_count == len(fake_data)",
"def test_write_csv_file(self, tmpdir):\n filename = tmpdir.join(\"output.csv\").strpath\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n csv_formatter.to_csv(self.records, path_or_buf=filename)\n\n csv = open(filename).read()\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n assert csv == csv_expected",
"def test_csv(transactional_db, client, auth_client, restricted_case, unrestricted_case, elasticsearch):\n content_type = 'text/csv'\n case_text = \"Opinion text\"\n restricted_url = api_reverse(\"cases-detail\", args=[restricted_case.id])\n unrestricted_url = api_reverse(\"cases-detail\", args=[unrestricted_case.id])\n list_url = api_reverse(\"cases-list\")\n\n # unauthorized request can't fetch restricted CSV\n response = client.get(restricted_url, {\"full_case\": \"true\", \"format\": \"csv\"})\n check_response(response, content_excludes=case_text, content_type=content_type)\n\n # authorized request can fetch restricted CSV\n response = auth_client.get(restricted_url, {\"full_case\": \"true\", \"format\": \"csv\"})\n check_response(response, content_includes=case_text, content_type=content_type)\n\n # both can fetch unrestricted CSV\n response = client.get(unrestricted_url, {\"full_case\": \"true\", \"format\": \"csv\"})\n check_response(response, content_includes=case_text, content_type=content_type)\n response = auth_client.get(unrestricted_url, {\"full_case\": \"true\", \"format\": \"csv\"})\n check_response(response, content_includes=case_text, content_type=content_type)\n\n # ?format=csv works on list page\n response = auth_client.get(list_url, {\"full_case\": \"true\", \"format\": \"csv\"})\n check_response(response, content_type=content_type)\n content = response.content.decode()\n rows = DictReader(StringIO(content))\n assert set(row['name_abbreviation'] for row in rows) == set(CaseMetadata.objects.values_list('name_abbreviation', flat=True))\n\n # text/plain error display\n response = auth_client.get(list_url, {\"full_case\": \"invalid\", \"format\": \"csv\"})\n check_response(response, status_code=400, content_type=\"text/plain\", content_includes=\"Select a valid choice\")",
"def test_value_patterns(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n dfx.datasets.employees.to_csv(f.name, index=False)\n dfx.main(['dfx', f.name], print_func=self.print)\n expected=\"\"\"employee_id : id, num_normal, num long tail\nregion : categorical, flag\nstate : categorical\nsalary : num_normal, num long tail\ncompany : categorical\nmanager_id : categorical, num_normal\n \"\"\"\n # ignore first line of output\n actual=\"\\n\".join(self.output.split('\\n')[1:])\n self.assertEqual(actual, expected)",
"def test_csvfile_get_data_impossible_filter(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n adapter = CSVFile(\"test.csv\")\n assert list(adapter.get_data({\"index\": Impossible()}, [])) == []",
"def test_case():\n\n input_csv_file_path = 'data/sample_data.csv'\n output_csv_file_path = 'data/result_data.csv'\n generate_html(input_file=input_csv_file_path,\n output_file=output_csv_file_path)\n\n with open(output_csv_file_path, 'r') as file:\n csv_file = csv.DictReader(file)\n\n # assert the col titles in output file\n assert csv_file.fieldnames == ['assessment_id', 'html']\n\n # assert the row count in output file\n row_count = sum(1 for row in csv_file)\n assert row_count == 3",
"def test_get_students_features_csv(self):\r\n url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + '/csv', {})\r\n self.assertEqual(response['Content-Type'], 'text/csv')",
"def generate_data(self, file_name, data, header=None):\n with open(f'{self.path_file}/{file_name}.csv', 'w') as csvfile:\n if header:\n csvfile.write(header)\n csvfile.writelines(data)\n return True",
"def test_csvfile_empty(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=\"\")\n\n with pytest.raises(ProgrammingError) as excinfo:\n CSVFile(\"test.csv\")\n assert str(excinfo.value) == \"The file has no rows\""
]
| [
"0.77176857",
"0.74230194",
"0.73120224",
"0.7183386",
"0.71010906",
"0.7078166",
"0.7026054",
"0.69418746",
"0.6898602",
"0.6708954",
"0.66887707",
"0.6599895",
"0.6592151",
"0.65827006",
"0.6554669",
"0.65479755",
"0.6535834",
"0.6514693",
"0.65141994",
"0.65083003",
"0.65041417",
"0.6482287",
"0.6480506",
"0.64626443",
"0.6449339",
"0.6425798",
"0.6416088",
"0.63952726",
"0.63671356",
"0.6351786"
]
| 0.7783978 | 0 |
Test that unicode cell values are converted correctly to csv. | def test_csv_with_unicode(self):
data = [
{
'name': 'Normal string',
'item_num': 1,
},
{
'name': u'String with ' + unichr(0x16c) + ' char',
'item_num': 2,
},
]
table = TableReportForTesting(data)
response = table.as_csv(HttpRequest())
self.assertEqual(response.status_code, 200)
# Expect csv content to be utf-8 encoded.
content = response.content
result = ('Name,Item Num\r\n'
'Normal string,1\r\n'
'String with ' + unichr(0x16c) + ' char,2\r\n')
if PY3:
content = content.decode(settings.DEFAULT_CHARSET).replace('\x00', '')
else:
result = result.encode(settings.DEFAULT_CHARSET)
self.assertEqual(content, result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unicode_with_csv():\n data = [['观音', '1'], ['Ποσειδῶν', '456']]\n headers = ['letters', 'number']\n output = delimited_output_adapter.adapter(data, headers)\n assert \"\\n\".join(output) == dedent('''\\\n letters,number\\n\\\n 观音,1\\n\\\n Ποσειδῶν,456''')",
"def unicode_csv_reader(data, **kwargs):\r\n\tdata_file = csv.reader(data, **kwargs)\r\n\tfor row in data_file:\r\n\t\tyield [str(cell) for cell in row]",
"def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):\n csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)\n for row in csv_reader:\n yield [unicode(cell, 'utf-8') for cell in row]",
"def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')",
"def test_UnicodeWriter(self):\r\n tmp = tempfile.NamedTemporaryFile()\r\n uw = pybossa.util.UnicodeWriter(tmp)\r\n fake_csv = ['one, two, three, {\"i\": 1}']\r\n for row in csv.reader(fake_csv):\r\n # change it for a dict\r\n row[3] = dict(i=1)\r\n uw.writerow(row)\r\n tmp.seek(0)\r\n err_msg = \"It should be the same CSV content\"\r\n with open(tmp.name, 'rb') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n for item in row:\r\n assert item in fake_csv[0], err_msg",
"def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_simple_export(self):\n\n self.import_file(\"assessment_full_no_warnings.csv\")\n data = [{\n \"object_name\": \"Assessment\",\n \"filters\": {\n \"expression\": {}\n },\n \"fields\": \"all\",\n }]\n response = self.export_csv(data)\n self.assertIn(u\"\\u5555\", response.data.decode(\"utf8\"))",
"def to_csv(val):\n # Make sure all individual values do not contain\n # leading or trailing whitespaces.\n unicode_values = list(map(str.strip, map(str, val)))\n stream = StringIO()\n writer = csv.writer(stream, dialect=\"excel\")\n writer.writerow(unicode_values)\n # Strip any csv.writer added carriage return line feeds\n # and double quotes before saving.\n csv_string = stream.getvalue().strip().strip('\"')\n if len(unicode_values) > 1:\n csv_string = \"[\" + csv_string + \"]\"\n return csv_string",
"def write_csv(d, f):\n with open(f, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(d[0])\n for row in d[1]:\n row_encode = list()\n for x in row:\n if type(x) == unicode:\n row_encode.append(x.encode('utf8'))\n else:\n row_encode.append(x)\n writer.writerow(row_encode)\n return True",
"def test_purchased_csv(self):\r\n report = initialize_report(\"itemized_purchase_report\", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)\r\n csv_file = StringIO.StringIO()\r\n report.write_csv(csv_file)\r\n csv = csv_file.getvalue()\r\n csv_file.close()\r\n # Using excel mode csv, which automatically ends lines with \\r\\n, so need to convert to \\n\r\n self.assertEqual(csv.replace('\\r\\n', '\\n').strip(), self.CORRECT_CSV.strip())",
"def test_all_text_type(extra_kwargs):\n data = [[1, \"\", None, Decimal(2)]]\n headers = [\"col1\", \"col2\", \"col3\", \"col4\"]\n output_formatter = TabularOutputFormatter()\n for format_name in output_formatter.supported_formats:\n for row in output_formatter.format_output(\n iter(data), headers, format_name=format_name, **extra_kwargs\n ):\n assert isinstance(row, text_type), \"not unicode for {}\".format(format_name)",
"def test_return_csv_string(self):\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n csv = csv_formatter.to_csv(self.records, path_or_buf=None)\n assert csv == csv_expected",
"def test_write_csv_file(self, tmpdir):\n filename = tmpdir.join(\"output.csv\").strpath\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n csv_formatter.to_csv(self.records, path_or_buf=filename)\n\n csv = open(filename).read()\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n assert csv == csv_expected",
"def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))",
"def test_itercsv_emits_data_lines():\n expected = [\n b'Hello,World\\r\\n',\n b'1,2\\r\\n',\n b'3,4\\r\\n'\n ]\n assert list(itercsv(['Hello', 'World'], [[1, 2], [3, 4]])) == expected",
"def unicode_csv_reader(utf8_data, **kwargs):\r\n\r\n csv_reader = csv.DictReader(utf8_data, **kwargs)\r\n for row in csv_reader:\r\n yield {unicode(key, 'utf-8'): unicode(value, 'utf-8') for key, value in row.iteritems()}",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def _unicode_rows(stream, encoding, dialect, **kwds):\n # Get Unicode stream.\n if isinstance(stream, io.IOBase):\n streamreader_type = codecs.getreader(encoding)\n unicode_stream = streamreader_type(stream)\n elif isinstance(stream, Iterable):\n first_row, stream = iterpeek(stream)\n if isinstance(first_row, unicode):\n unicode_stream = stream # Ignores given *encoding*.\n else:\n unicode_stream = (row.decode(encoding) for row in stream)\n else:\n cls_name = stream.__class__.__name__\n raise TypeError('unsupported type {0}'.format(cls_name))\n\n # Re-encode as UTF-8.\n utf8_stream = (x.encode('utf-8') for x in unicode_stream)\n\n # Pass to csv.reader() and return generator.\n reader = csv.reader(utf8_stream, dialect=dialect, **kwds)\n make_unicode = lambda row: [unicode(s, 'utf-8') for s in row]\n return (make_unicode(row) for row in reader)",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def _get_utf8_encoded_rows(row):\n\n if six.PY2:\n return [six.text_type(item).encode('utf-8') for item in row]\n else:\n return [six.text_type(item) for item in row]",
"def write_csv(self, filelike):\r\n items = self.rows()\r\n writer = unicodecsv.writer(filelike, encoding=\"utf-8\")\r\n writer.writerow(self.header())\r\n for item in items:\r\n writer.writerow(item)",
"def test_csvfile_unordered(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\"\n1\n2\n1\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_doubled_quotes(read_csv):\n tbl = \"\\n\".join( # noqa: FLY002\n [\n \"a,b\",\n '\"d\"\"\",\"d\"\"q\"',\n '\"\"\"q\",\"\"\"\"',\n ]\n )\n # fmt: off\n expected = Table([['d\"', '\"q'],\n ['d\"q', '\"']],\n names=('a', 'b'))\n # fmt: on\n dat = read_csv(tbl)\n assert_table_equal(dat, expected)\n\n # In addition to the local read_csv wrapper, check that default\n # parsing with guessing gives the right answer.\n for fast_reader in True, False:\n dat = ascii.read(tbl, fast_reader=fast_reader)\n assert_table_equal(dat, expected)",
"def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])",
"def test_refund_report_purchased_csv(self):\r\n report = initialize_report(\"refund_report\", self.now - self.FIVE_MINS, self.now + self.FIVE_MINS)\r\n csv_file = StringIO.StringIO()\r\n report.write_csv(csv_file)\r\n csv = csv_file.getvalue()\r\n csv_file.close()\r\n # Using excel mode csv, which automatically ends lines with \\r\\n, so need to convert to \\n\r\n self.assertEqual(csv.replace('\\r\\n', '\\n').strip(), self.CORRECT_REFUND_REPORT_CSV.strip())",
"def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]",
"def test_csv_wrapper():\n # Test comma-delimited output.\n data = [['abc', '1'], ['d', '456']]\n headers = ['letters', 'number']\n output = delimited_output_adapter.adapter(iter(data), headers)\n assert \"\\n\".join(output) == dedent('''\\\n letters,number\\n\\\n abc,1\\n\\\n d,456''')\n\n # Test tab-delimited output.\n data = [['abc', '1'], ['d', '456']]\n headers = ['letters', 'number']\n output = delimited_output_adapter.adapter(\n iter(data), headers, table_format='tsv')\n assert \"\\n\".join(output) == dedent('''\\\n letters\\tnumber\\n\\\n abc\\t1\\n\\\n d\\t456''')\n\n with pytest.raises(ValueError):\n output = delimited_output_adapter.adapter(\n iter(data), headers, table_format='foobar')\n list(output)",
"def test_dummy(self, data):\r\n source, expected = data\r\n result = self.converter.convert(source)\r\n self.assertUnicodeEquals(result, expected)"
]
| [
"0.8036314",
"0.6922067",
"0.69172096",
"0.69118446",
"0.6771992",
"0.6746358",
"0.6688988",
"0.6558301",
"0.6473094",
"0.6323869",
"0.6294369",
"0.6276984",
"0.62547374",
"0.62028056",
"0.6178271",
"0.61072475",
"0.60752225",
"0.6039979",
"0.600815",
"0.5966141",
"0.59431046",
"0.591119",
"0.59046924",
"0.5903771",
"0.5867852",
"0.5843502",
"0.5843207",
"0.5841782",
"0.5834094",
"0.580425"
]
| 0.84924006 | 0 |
Ensure that excludesomecolumnsfromreport works. | def test_exclude_from_report(self):
data = [
{
'name': 'page 1',
'item_num': 1,
},
{
'name': 'page 2',
'item_num': 2,
},
]
class TableWithExclude(TableReportForTesting):
class Meta:
exclude_from_report = ('item_num',)
table = TableWithExclude(data)
table.exclude = ('name', )
self.assertEqual(table.exclude_from_report, ('item_num',))
response = table.as_csv(HttpRequest())
self.assertEqual(response.status_code, 200)
content = response.content
if PY3:
content = content.decode(settings.DEFAULT_CHARSET).replace('\x00', '')
self.assertEqual(table.exclude, ('name',)) # Attribute 'exclude_from_report' shouldn't overwrite 'exclude'
self.assertEqual(
content,
('Name\r\n'
'page 1\r\n'
'page 2\r\n')
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exclude_cols(self, *_, **__) -> Tuple[str, ...]:",
"def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns",
"def test_overall_report_columns():\n assert (len(overall_data['columns']) == 31)",
"def exclude_columns():\r\n # Table(..., exclude=...)\r\n table = UnorderedTable([], exclude=(\"i\"))\r\n assert [c.name for c in table.columns] == [\"alpha\", \"beta\"]\r\n\r\n # Table.Meta: exclude=...\r\n class PartialTable(UnorderedTable):\r\n class Meta:\r\n exclude = (\"alpha\", )\r\n table = PartialTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"beta\"]\r\n\r\n # Inheritence -- exclude in parent, add in child\r\n class AddonTable(PartialTable):\r\n added = tables.Column()\r\n table = AddonTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"beta\", \"added\"]\r\n\r\n # Inheritence -- exclude in child\r\n class ExcludeTable(UnorderedTable):\r\n added = tables.Column()\r\n class Meta:\r\n exclude = (\"beta\", )\r\n table = ExcludeTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"alpha\", \"added\"]",
"def test_optional_cols():\n extract_config_dir = os.path.join(\n settings.BASE_DIR, \"extract_configs\", \"templates\"\n )\n for ft, obj in FILE_TYPES.items():\n ec_file = obj[\"template\"]\n required_cols = obj[\"required_columns\"]\n if not ec_file:\n continue\n\n ec_path = os.path.join(extract_config_dir, ec_file)\n print(f\"Testing extract config: {ec_path}\")\n assert os.path.exists(ec_path)\n\n # Drop columns that are not required\n df = make_template_df(ft)[required_cols]\n\n Extractor().extract(df, ec_path)",
"def _dataframe_column_check(df: DataFrame, compulsory_columns: Sequence) -> None:\n if not set(compulsory_columns).issubset(df.columns):\n diff = set(compulsory_columns).difference(df.columns)\n msg = (\n \"The following compulsory column(s) are missing from the \"\n f\"DataFrame: {diff}\"\n )\n raise ValueError(msg)",
"def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )",
"def test_clean_columns():\n assert clean_columns('Id, AdCampaignId, CampaignId') == ['id', 'adCampaignId', 'campaignId']",
"def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols",
"def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")",
"def check_cols(self):\n if self.ad_tab is not None and 'date' not in self.ad_cols:\n raise DataException(\"\"\"date column not found in adServer table.\"\"\")\n if self.ad_tab is not None and 'impressions' not in self.ad_cols:\n raise DataException(\"\"\"impressions column not found in adServer table.\"\"\")\n if 'timestamp' not in self.log_cols and 'date' not in self.log_cols:\n raise DataException(\"\"\"Both timestamp and date column missing from {t}\nCannot do dailyQA\"\"\".format(t=self.log_tab))\n if self.configs['hourshift'] != 0 or 'date' not in self.log_cols:\n if 'timestamp' not in self.log_cols:\n raise DataException(\"\"\"Time shift requested \\\nbut no timestamp column in {t}.\"\"\".format(t=self.log_tab))\n else:\n check_timestamp(self.configs['schema'], self.log_tab)",
"def check_dataframe_columns(df):\r\n if len(set(df.columns).intersection(\r\n set([constants.CASE_CONCEPT_NAME, xes_constants.DEFAULT_NAME_KEY,\r\n xes_constants.DEFAULT_TIMESTAMP_KEY]))) < 3:\r\n raise Exception(\r\n \"please format your dataframe accordingly! df = pm4py.format_dataframe(df, case_id='<name of the case ID column>', activity_key='<name of the activity column>', timestamp_key='<name of the timestamp column>')\")",
"def test_method_excludes(self):\n self.assertContains(self.response, \"<td>get_full_name</td>\")\n self.assertNotContains(self.response, \"<td>_get_full_name</td>\")\n self.assertNotContains(self.response, \"<td>add_image</td>\")\n self.assertNotContains(self.response, \"<td>delete_image</td>\")\n self.assertNotContains(self.response, \"<td>set_status</td>\")\n self.assertNotContains(self.response, \"<td>save_changes</td>\")",
"def _non_listed_ea_columns_check():\n for ea_row in unused_list:\n # dup Check in disposition\n ddi_index = views_index[ea_row[15]]\n for key, value in ea_index.items():\n # ea attributes that could be listed.\n if key == 'Datacenter' or key == 'IPR Designation':\n continue\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue",
"def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)",
"def test_quoted_column_names_reading_dont_specify_names(self):\n self.quoted_column_names_reading_template(specify_column_names=False)",
"def _listed_ea_column_check():\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[0] in ea_ipr_d_values and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Update IPR D src column with ea_row[0] for processing.\n # WORK IN PROGRESS\n elif ea_row[0] in ea_ipr_d_values and 'IPR Designation' \\\n in ddi_data[ddi_index][ea_row[1]]['extattrs']:\n pass\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key == 'IPR Designation':\n if ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] in ea_ipr_d_values:\n ea_row[16] = ea_row[16] + ',' + ea_row[0]\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[16]}])\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n elif ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] not in ea_ipr_d_values:\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[0]}])\n continue\n# # Check Disposition col. and if IPR D listed value needs\n# # updating. On listed IPR D values.\n# if ea_row[0].lower().strip() in ea_ipr_d_values \\\n# and ',' in ea_row[16]:\n# temp_list = ea_row[16].split(',')\n# temp_list = [x.strip() for x in temp_list]\n# if ea_row[0].lower().strip() in temp_list:\n# continue\n# else:\n# temp_list.append(ea_row[0].lower().strip())\n# temp_dict_override.update({key: temp_list})\n# import_override.append([ea_row[15].strip(),\n# ea_row[1].strip(),\n# ea_row[14].strip(),\n# temp_dict_override])\n# continue\n\n # Builds dataset for non-listed values. Final Step.\n # If key not in ddi data and src value is not none.\n # Assign to merge.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue",
"def get_column_excluded(self):\n return self.column_excluded or []",
"def get_cols_drop():",
"def validate_columns(self, fieldnames, dao):\n unstored_columns = ['blank']\n expected_columns = dao.model_type.__table__.columns.keys() + unstored_columns\n for column_name in fieldnames:\n if column_name not in expected_columns:\n raise AttributeError(f\"{self.file_path}: {column_name} column mismatch for \"\n f\"expected file type: {self.file_type.name}\")",
"def _check_missing_columns(self, df: pd.DataFrame) -> None:\n if any([c not in df.columns for c in REQUIRED_COLUMNS]):\n raise ValueError(\"Missing columns in dataset.\"\n f\"Columns: {df.columns}\"\n f\"Required: {REQUIRED_COLUMNS}\")",
"def remove_bad_columns(df):\n columns = []\n if 'job_runner_name' in df.columns:\n columns.append('job_runner_name')\n\n if 'handler' in df.columns:\n columns.append('handler')\n\n if 'destination_id' in df.columns:\n columns.append('destination_id')\n\n if 'input_file' in df.columns:\n columns.append('input_file')\n\n for column in columns:\n del df[column]\n\n return df",
"def test_collected_columns_no_table_two_col_from_two(self):\n col_double, allow_colspan = False, False # as_type != 'table'\n col_args = ('span', False, {})\n names, multi_field_row = ('first', 'billing_address_1'), True\n row = {name: self.form.fields[name] for name in names}\n col_count = 2\n expected = [self.form.collect_col_data(name, self.form.fields[name], *col_args) for name in names]\n for ea in expected:\n if multi_field_row:\n ea['css_classes'] = ' '.join(['nowrap', ea['css_classes']])\n ea['html_head_attr'] = ' class=\"nowrap\"'\n val = ea.pop('css_classes', '')\n val = ' class=\"%s\"' % val if val else ''\n ea['html_col_attr'] = val\n col_settings = (multi_field_row, col_count, col_double, allow_colspan)\n actual = self.form.collect_columns(row, col_settings, *col_args)\n\n self.assertEqual(len(expected), len(actual))\n for expect, got in zip(expected, actual):\n self.assertEqual(len(expect), len(got))\n self.assertListEqual(list(expect.keys()), list(got.keys()))\n self.assertListEqual(list(expect.values()), list(got.values()))\n self.assertEqual(expected, actual)",
"def test_extract_multiple_invalid_columns(self):\n keywords = [\"invalid\", \"another_invalid_col\"]\n self.dicom.extract_keywords(keywords)\n\n # test that columns were added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"invalid column not added to columns\")\n if u'another_invalid_col' not in columns:\n raise Exception(\"another_invalid_col not added to columns\")\n\n # compare actual with expected result\n invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)\n expected_result = [[None, None] for x in range(0, self.count)]\n self.assertEqual(invalid_columns, expected_result)",
"def test_split_reports_no_execution(self):\n self._test_reports_helper({\"--split-reports\": \"\"}, [\"compile.txt\"])",
"def test_columns_not_in_raw_var(self):\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )",
"def check_and_invert(columns, excluded):\n if isinstance(excluded, str):\n excluded = [excluded]\n\n included = columns.tolist()\n for exclude in excluded:\n if exclude in included:\n included.remove(exclude)\n return included",
"def test_missing_trial_type_column_warning(tmp_path_factory):\n bids_dataset = _new_bids_dataset(\n tmp_path_factory.mktemp(\"one_event_missing\")\n )\n events_files = get_bids_files(main_path=bids_dataset, file_tag=\"events\")\n # remove trial type column from one events.tsv file\n events = pd.read_csv(events_files[0], sep=\"\\t\")\n events.drop(columns=\"trial_type\", inplace=True)\n events.to_csv(events_files[0], sep=\"\\t\", index=False)\n\n with pytest.warns() as record:\n first_level_from_bids(\n dataset_path=bids_dataset, task_label=\"main\", space_label=\"MNI\",\n slice_time_ref=None,\n )\n assert (any(\"No column named 'trial_type' found\" in r.message.args[0]\n for r in record))",
"def exclude(self, *args, **kwargs):",
"def test_remove_invalid_reports(self):\n\n this_new_table = tornado_io.remove_invalid_reports(\n copy.deepcopy(TORNADO_TABLE_WITH_INVALID_ROWS)\n )\n\n self.assertTrue(this_new_table.equals(TORNADO_TABLE_NO_INVALID_ROWS))"
]
| [
"0.664114",
"0.6371519",
"0.61428374",
"0.6074265",
"0.60691434",
"0.5899227",
"0.5741733",
"0.5681908",
"0.56517905",
"0.5642689",
"0.5597826",
"0.5576453",
"0.5570386",
"0.55462635",
"0.5531787",
"0.55221844",
"0.550766",
"0.55015415",
"0.5487185",
"0.5482595",
"0.5481908",
"0.54763806",
"0.5473721",
"0.545916",
"0.5458377",
"0.54581255",
"0.54560494",
"0.54303133",
"0.54296654",
"0.54163855"
]
| 0.7021682 | 0 |
Test ability to generate excel output with simple input data. | def test_excel_simple_input(self, extension='xls'):
excel_support = getattr(settings, 'EXCEL_SUPPORT', django_tables2_reports.utils.get_excel_support())
response = self.table.treatement_to_response(
self.table.as_csv(HttpRequest()),
report_format='xls')
self.assertEqual(response.status_code, 200)
open('test-file-%s.%s' % (excel_support, extension),
'wb').write(response.content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generate_sample_sheet(self):\n pass",
"def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")",
"def test_export(self):\n\n # create fake data\n worksheet_name = \"chicken_woot\"\n mock_stores = {\n \"field_list\": [\n \"Company Name\", \"State\", \"City\", \"Trade Area\", \"Population (000)\", \"Per Capita Income ($)\", \"Aggregate Income ($M)\", \"Households (000)\",\n \"< $15K (000)\", \"$15-25K (000)\", \"$25-35K (000)\", \"$35-50K (000)\", \"$50-75K (000)\", \"$75-100K (000)\", \"$100-150K (000)\", \"$150-200K (000)\", \"$200K+ (000)\",\n \"Store ID\", \"Street Number\", \"Street\", \"Suite\", \"Zip Code\", \"Phone Number\", \"Store Opened\", \"Store Closed\", \"Company ID\", \"Trade Area ID\"\n ],\n \"results\": [\n [\"test company 1\", \"state\", \"city\", \"10 Mile Circle\", 142695, 25644, 999999999, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 2, \"street_number\", \"street\", \"suite\", \"zip\", \"phone\", \"2012-01-01\", None, 1, 1],\n [\"test company 1\", \"state\", \"city\", \"10 Mile Circle\", 142695, 25644, 999999999, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 3, \"street_number\", \"street\", \"suite\", \"zip\", \"phone\", \"2012-01-15\", \"2013-01-01\", 1, 2]\n ]\n }\n\n # create various mock objects\n mock_workbook = self.mox.CreateMockAnything()\n mock_sheet = self.mox.CreateMockAnything()\n mock_row = self.mox.CreateMockAnything()\n\n # create exporter\n exporter = ExcelStoreExporter(mock_stores, worksheet_name, self.mock_logger)\n\n # stub various methods/classes\n self.mox.StubOutWithMock(xlwt, \"Workbook\")\n self.mox.StubOutWithMock(exporter, \"_track_max_character\")\n self.mox.StubOutWithMock(exporter, \"_set_auto_widths\")\n\n # ------------- Begin Recording (long) -------------\n\n # create worksheet and workbook\n xlwt.Workbook().AndReturn(mock_workbook)\n mock_workbook.add_sheet(worksheet_name).AndReturn(mock_sheet)\n\n # add all headers (skip those that should be skipped)\n mock_sheet.write(0, 0, \"Company Name\", IsA(xlwt.XFStyle))\n exporter._track_max_character(0, \"Company Name\")\n mock_sheet.write(0, 1, \"State\", IsA(xlwt.XFStyle))\n exporter._track_max_character(1, \"State\")\n mock_sheet.write(0, 2, \"City\", IsA(xlwt.XFStyle))\n exporter._track_max_character(2, \"City\")\n mock_sheet.write(0, 3, \"Trade Area\", IsA(xlwt.XFStyle))\n exporter._track_max_character(3, \"Trade Area\")\n mock_sheet.write(0, 4, \"Population (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(4, \"Population (000)\")\n mock_sheet.write(0, 5, \"Per Capita Income ($)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(5, \"Per Capita Income ($)\")\n mock_sheet.write(0, 6, \"Aggregate Income ($M)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(6, \"Aggregate Income ($M)\")\n mock_sheet.write(0, 7, \"Households (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(7, \"Households (000)\")\n mock_sheet.write(0, 8, \"< $15K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(8, \"< $15K (000)\")\n mock_sheet.write(0, 9, \"$15-25K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(9, \"$15-25K (000)\")\n mock_sheet.write(0, 10, \"$25-35K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(10, \"$25-35K (000)\")\n mock_sheet.write(0, 11, \"$35-50K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(11, \"$35-50K (000)\")\n mock_sheet.write(0, 12, \"$50-75K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(12, \"$50-75K (000)\")\n mock_sheet.write(0, 13, \"$75-100K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(13, \"$75-100K (000)\")\n mock_sheet.write(0, 14, \"$100-150K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(14, \"$100-150K (000)\")\n mock_sheet.write(0, 15, \"$150-200K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(15, \"$150-200K (000)\")\n mock_sheet.write(0, 16, \"$200K+ (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(16, \"$200K+ (000)\")\n mock_sheet.write(0, 17, \"Street Number\", IsA(xlwt.XFStyle))\n exporter._track_max_character(17, \"Street Number\")\n mock_sheet.write(0, 18, \"Street\", IsA(xlwt.XFStyle))\n exporter._track_max_character(18, \"Street\")\n mock_sheet.write(0, 19, \"Suite\", IsA(xlwt.XFStyle))\n exporter._track_max_character(19, \"Suite\")\n mock_sheet.write(0, 20, \"Zip Code\", IsA(xlwt.XFStyle))\n exporter._track_max_character(20, \"Zip Code\")\n mock_sheet.write(0, 21, \"Phone Number\", IsA(xlwt.XFStyle))\n exporter._track_max_character(21, \"Phone Number\")\n mock_sheet.write(0, 22, \"Store Opened\", IsA(xlwt.XFStyle))\n exporter._track_max_character(22, \"Store Opened\")\n mock_sheet.write(0, 23, \"Store Closed\", IsA(xlwt.XFStyle))\n exporter._track_max_character(23, \"Store Closed\")\n\n # write down all the fields from each row (skip those fields that should be skipped)\n mock_sheet.row(1).AndReturn(mock_row)\n mock_row.set_cell_text(0, \"test company 1\")\n exporter._track_max_character(0, \"test company 1\")\n mock_row.set_cell_text(1, \"state\")\n exporter._track_max_character(1, \"state\")\n mock_row.set_cell_text(2, \"city\")\n exporter._track_max_character(2, \"city\")\n mock_row.set_cell_text(3, \"10 Mile Circle\")\n exporter._track_max_character(3, \"10 Mile Circle\")\n mock_row.set_cell_number(4, 142.695, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(4, 142695)\n mock_row.set_cell_number(5, 25644, exporter.dollar_style)\n exporter._track_max_character(5, 25644)\n mock_row.set_cell_number(6, 999.999999, exporter.dollar_style)\n exporter._track_max_character(6, 999999999)\n mock_row.set_cell_number(7, 5.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(7, 5000)\n mock_row.set_cell_number(8, 6.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(8, 6000)\n mock_row.set_cell_number(9, 7.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(9, 7000)\n mock_row.set_cell_number(10, 8.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(10, 8000)\n mock_row.set_cell_number(11, 9.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(11, 9000)\n mock_row.set_cell_number(12, 10.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(12, 10000)\n mock_row.set_cell_number(13, 11.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(13, 11000)\n mock_row.set_cell_number(14, 12.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(14, 12000)\n mock_row.set_cell_number(15, 13.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(15, 13000)\n mock_row.set_cell_number(16, 14.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(16, 14000)\n mock_row.set_cell_text(17, \"street_number\")\n exporter._track_max_character(17, \"street_number\")\n mock_row.set_cell_text(18, \"street\")\n exporter._track_max_character(18, \"street\")\n mock_row.set_cell_text(19, \"suite\")\n exporter._track_max_character(19, \"suite\")\n mock_row.set_cell_text(20, \"zip\")\n exporter._track_max_character(20, \"zip\")\n mock_row.set_cell_text(21, \"phone\")\n exporter._track_max_character(21, \"phone\")\n mock_row.set_cell_text(22, \"2012-01-01\")\n exporter._track_max_character(22, \"2012-01-01\")\n exporter._track_max_character(23, \" \")\n\n # second row\n mock_sheet.row(2).AndReturn(mock_row)\n mock_row.set_cell_text(0, \"test company 1\")\n exporter._track_max_character(0, \"test company 1\")\n mock_row.set_cell_text(1, \"state\")\n exporter._track_max_character(1, \"state\")\n mock_row.set_cell_text(2, \"city\")\n exporter._track_max_character(2, \"city\")\n mock_row.set_cell_text(3, \"10 Mile Circle\")\n exporter._track_max_character(3, \"10 Mile Circle\")\n mock_row.set_cell_number(4, 142.695, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(4, 142695)\n mock_row.set_cell_number(5, 25644, exporter.dollar_style)\n exporter._track_max_character(5, 25644)\n mock_row.set_cell_number(6, 999.999999, exporter.dollar_style)\n exporter._track_max_character(6, 999999999)\n mock_row.set_cell_number(7, 5.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(7, 5000)\n mock_row.set_cell_number(8, 6.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(8, 6000)\n mock_row.set_cell_number(9, 7.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(9, 7000)\n mock_row.set_cell_number(10, 8.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(10, 8000)\n mock_row.set_cell_number(11, 9.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(11, 9000)\n mock_row.set_cell_number(12, 10.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(12, 10000)\n mock_row.set_cell_number(13, 11.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(13, 11000)\n mock_row.set_cell_number(14, 12.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(14, 12000)\n mock_row.set_cell_number(15, 13.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(15, 13000)\n mock_row.set_cell_number(16, 14.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(16, 14000)\n mock_row.set_cell_text(17, \"street_number\")\n exporter._track_max_character(17, \"street_number\")\n mock_row.set_cell_text(18, \"street\")\n exporter._track_max_character(18, \"street\")\n mock_row.set_cell_text(19, \"suite\")\n exporter._track_max_character(19, \"suite\")\n mock_row.set_cell_text(20, \"zip\")\n exporter._track_max_character(20, \"zip\")\n mock_row.set_cell_text(21, \"phone\")\n exporter._track_max_character(21, \"phone\")\n mock_row.set_cell_text(22, \"2012-01-15\")\n exporter._track_max_character(22, \"2012-01-15\")\n mock_row.set_cell_text(23, \"2013-01-01\")\n exporter._track_max_character(23, \"2013-01-01\")\n\n # set auto widths\n exporter._set_auto_widths(mock_sheet)\n\n # ------------- End Recording (long) -------------\n\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n workbook = exporter.get_excel_workbook()\n\n # make sure workbook is the excel workbook\n self.assertEqual(workbook, mock_workbook)",
"def test_write_data(workbook):\n workbook.write_data(\"example data\")\n assert workbook.get_cell(workbook.get_last_row(), 1) == \"example data\"",
"def generate_excel(structure:dict, output:str):\t\n\n\tstructure_columns = identify_columns(structure)\n\n\tworkbook = xlsxwriter.Workbook(output)\n\tworksheet = workbook.add_worksheet()\n\n\tcol = 0\n\tfor column in structure_columns:\n\t\tworksheet.write(0, col, column)\n\t\tcol += 1\n\n\trow = 1\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif isinstance(day[key], list):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), ', '.join(day[key]))\n\t\t\telif isinstance(day[key], dict):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), str(day[key]))\n\t\t\telse:\n\t\t\t\tworksheet.write(row, structure_columns.index(key), day[key])\n\t\trow += 1\n\t\n\tworksheet.freeze_panes(1, 1)\n\tworkbook.close()",
"def test_generate_report():\n # Calling helper function to create data\n data = helper_create_data()\n cat_vars = ['C1', 'C2', 'C3', 'C4']\n num_vars = ['N1', 'N2', 'N3']\n\n # Positive test case: Checking whether the function runs properly or not\n assert eda.generate_report(data, cat_vars, num_vars), \\\n \"Expected True but False returned\"\n\n # Negative test case: Checking whether the function returns False\n # fr wrong output\n assert not eda.generate_report(data, cat_vars, \"String Input\"), \\\n \"Expected False but True returned\"",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def test_parse_sample_sheet(self):\n pass",
"def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))",
"def test_to_csv(self):\n sale = SaleFactory(total_value=12347)\n self.assertIn('12347', sale.to_csv())",
"def test_value_patterns(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n dfx.datasets.employees.to_csv(f.name, index=False)\n dfx.main(['dfx', f.name], print_func=self.print)\n expected=\"\"\"employee_id : id, num_normal, num long tail\nregion : categorical, flag\nstate : categorical\nsalary : num_normal, num long tail\ncompany : categorical\nmanager_id : categorical, num_normal\n \"\"\"\n # ignore first line of output\n actual=\"\\n\".join(self.output.split('\\n')[1:])\n self.assertEqual(actual, expected)",
"def test_output(self):\n work_logs = [WorkLog(\"MYB-7\", datetime(2020, 1, 20), 3600, \"René Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 3600, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 18), 5400, \"John Doe\"),\n WorkLog(\"MYB-5\", datetime(2020, 1, 12), 3600, \"John Doe\")]\n\n issue_myb_5 = Issue(10005, \"MYB-5\", \"Summary of issue MYB-5\", \"MYB-3\", \"Summary of the parent issue of MYB-5\", 3600, 900, datetime(2020, 1, 15))\n issue_myb_5.issue_start_date = datetime(2020, 1, 10)\n issue_myb_7 = Issue(10007, \"MYB-7\", \"Summary of issue MYB-7\", None, None, None, None, None)\n\n issues = [issue_myb_5,\n issue_myb_7]\n\n stdout = sys.stdout\n with open('jira-time-report-console.txt', 'w') as sys.stdout:\n jiratimereport.process_work_logs(\"console\", issues, work_logs)\n sys.stdout = stdout\n self.assertTrue(filecmp.cmp('console_output.txt', 'jira-time-report-console.txt'))\n\n jiratimereport.process_work_logs(\"csv\", issues, work_logs)\n self.assertTrue(filecmp.cmp('csv_output.csv', 'jira-time-report.csv'))\n\n jiratimereport.process_work_logs(\"excel\", issues, work_logs)\n expected_excel = pd.read_excel('excel_output.xlsx')\n actual_excel = pd.read_excel('jira-time-report.xlsx')\n self.assertTrue(expected_excel.equals(actual_excel))",
"def generate_xlsx_report(self, workbook, data, parts_data):\n worksheet = workbook.add_worksheet(\"daily_parts_issuance_wizard\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 20)\n worksheet.set_column(3, 3, 15)\n worksheet.set_column(4, 4, 10)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 10)\n worksheet.set_column(8, 8, 15)\n worksheet.set_column(9, 9, 10)\n worksheet.set_column(10, 10, 15)\n worksheet.set_column(11, 11, 10)\n worksheet.set_column(12, 12, 20)\n worksheet.set_column(13, 13, 5)\n worksheet.set_column(14, 14, 5)\n worksheet.set_column(15, 15, 5)\n\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n border = workbook.add_format(\n {\"border\": 2, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n merge_format = workbook.add_format({\"border\": 2, \"align\": \"center\"})\n format1 = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n format1.set_bg_color(\"gray\")\n date = workbook.add_format({\"num_format\": \"dd/mm/yy\"})\n\n worksheet.merge_range(\"C3:F3\", \"Merged Cells\", merge_format)\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"DAILY PARTS ISSUANCE\", tot)\n row += 1\n worksheet.write(row, 2, \"Date From:\", tot)\n worksheet.write(row, 3, data[\"form\"][\"date_from\"] or \"\", border)\n worksheet.write(row, 4, \"To:\", tot)\n worksheet.write(row, 5, data[\"form\"][\"date_to\"] or \"\", border)\n row += 2\n worksheet.write(row, 0, \"CMF\", bold)\n row = 3\n\n for objec in self.get_work_order_detail(data[\"form\"]):\n row += 3\n worksheet.write(row, 0, \"DATE ISSUED :\", bold)\n worksheet.write(row, 1, objec.get(\"date\") or \"\", date)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"WO NO.\", format1)\n worksheet.write(row, 2, \"VEHICLE ID\", format1)\n worksheet.write(row, 3, \"PART NO.\", format1)\n worksheet.write(row, 4, \"PART NAME\", format1)\n worksheet.write(row, 5, \"VEHICLE MAKE\", format1)\n worksheet.write(row, 6, \"USED\", format1)\n worksheet.write(row, 7, \"UNIT TYPE\", format1)\n worksheet.write(row, 8, \"OLD PART RETURND\", format1)\n worksheet.write(row, 9, \"ISSUED BY\", format1)\n worksheet.write(row, 10, \"REMARKS\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in objec.get(\"value\"):\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"wo_name\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"vehicle_id\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_no\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_name\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"vehicle_make\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"qty\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"uom\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"old_part_return\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"issued_by\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"remarks\") or \"\", border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)",
"def create_template(path_string) :\r\n today = datetime.now()\r\n today = today.strftime('%y%y%m%d%H%M%S')\r\n # print(today)\r\n temp_path = os.path.join(path_string, today)\r\n # temp_path = today\r\n # Create a workbook and add a worksheet.\r\n workbook = xlsxwriter.Workbook(f'{temp_path}.xlsx')\r\n worksheet0 = workbook.add_worksheet('ATR') # Defaults to Sheet1.\r\n worksheet1 = workbook.add_worksheet('ESS') # Data.\r\n worksheet2 = workbook.add_worksheet('Statistics') # Defaults to Sheet\r\n\r\n # Some data we want to write to the worksheet.\r\n Tests_List = ['Temp', 'SN', 'Output Power @ P1dBCP', 'Output Power Control Range/Resolution, FWD PWR Ind',\r\n 'Output IP3', 'LO Carrier Leakage', 'Sideband Suppression',\r\n 'Frequency Accuracy and Stability', 'A1 - Noise Figure vs. Gain', 'A1 - Gain variability',\r\n 'A1 - Image Suppression vs. Gain', 'Spurious',\r\n 'A2 - Noise Figure vs. Gain', 'A2 - Gain variability', 'A2 - Image Suppression vs. Gain',\r\n 'Average Power Consumption', 'Input Voltage', 'Digital Tests'\r\n ]\r\n\r\n # Start from the first cell. Rows and columns are zero indexed.\r\n row = 0\r\n # col = 0\r\n\r\n # Iterate over the data and write it out row by row.\r\n for index in range(3) :\r\n for i in range(len(Tests_List)) :\r\n worksheet0.write(row, i, Tests_List[i])\r\n worksheet1.write(row, i, Tests_List[i])\r\n worksheet2.write(row, i, Tests_List[i])\r\n # col += 1\r\n\r\n workbook.close()\r\n\r\n return today, temp_path",
"def generate_test_data(self):\n self.message('Generating {} rows of unique keyed test data.'.format(self.test_data_row_count))\n if not self.open_workbooks():\n exit()\n\n # populate our data dump input files\n self.populate_sheet(self.wb_incident, self.wb_incident.active, self.fn_incident, 'Hypercare Incidents', 'INC')\n self.populate_sheet(self.wb_enhancement, self.wb_enhancement.active, self.fn_enhancement,\n 'Hypercare Enhancements', 'ENH')\n self.populate_sheet(self.wb_defect, self.wb_defect.active, self.fn_defect, 'Hypercare Defects', 'DFC')\n self.populate_sheet(self.wb_alm, self.wb_alm.active, self.fn_alm, 'ALM Defects', 'ALM')\n\n self.message('Completed generating input file')",
"def write_results_to_excel_file(spec, runs, output, process_id, path_name):\n\n wb = Workbook()\n if os.path.isfile(path_name):\n wb = load_workbook(path_name)\n\n sheet_result_title = \"P_\" + str(spec['num_of_producers'])\n\n if sheet_result_title not in wb.sheetnames:\n wb.create_sheet(sheet_result_title)\n\n sheet_result = initiate_worksheet(wb, sheet_result_title)\n ind_col = 1\n ind_row = 1\n while sheet_result.cell(row=ind_row, column=ind_col).value is not None:\n ind_row += 1\n # parameters\n for col_exc in spec:\n sheet_result.cell(row=ind_row, column=ind_col).value = spec[col_exc]\n ind_col += 1\n # runs\n sheet_result.cell(row=ind_row, column=ind_col).value = runs\n ind_col += 1\n # output\n for col_out in output:\n sheet_result.cell(row=ind_row, column=ind_col).value = output[col_out]\n ind_col += 1\n # processID\n sheet_result.cell(row=ind_row, column=ind_col).value = process_id\n ind_col += 1\n\n wb.save(path_name)",
"def create_xlsx(birthday_counts,filename='data'):\n with open(f'{filename}.csv',mode='w') as f:\n f.write('Name,Birthday,Year,Mobile No.')\n df = pd.read_csv(f'{filename}.csv')\n for row in range(0,birthday_counts):\n print('Enter Data To Create \".xlsx\" File')\n df.loc[row,'Name'] = input(f'Enter Name Of {row+1} Column: \\n')\n df.loc[row,'Birthday'] = input(f'Enter Birthday Of {row+1} Column In DD/MM Formant : \\n')\n df.loc[row,'Year'] = int(input(f'Enter Last Time Wished Year Of {row+1} Column In YYYY Format: \\n'))\n df.loc[row,'Mobile No.'] = int(input(f'Enter Whtasapp Mobile No. Of {row+1} Column With Country Code Without \"+\" : \\n'))\n df.to_excel(f'{filename}.xlsx',index=False)\n os.remove(f'{filename}.csv')",
"def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()",
"def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')",
"def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)",
"def test_read_excel_big(test_mp, tmp_path):\n tmp_path /= \"output.xlsx\"\n\n # Write a 25-element parameter with max_row=10 → split across 3 sheets\n scen = ixmp.Scenario(test_mp, **models[\"dantzig\"], version=\"new\")\n add_random_model_data(scen, 25)\n scen.to_excel(tmp_path, items=ixmp.ItemType.MODEL, max_row=10)\n\n # Initialize target scenario for reading\n scen_empty = ixmp.Scenario(test_mp, \"foo\", \"bar\", version=\"new\")\n scen_empty.init_set(\"random_set\")\n scen_empty.init_par(\n \"random_par\", scen.idx_sets(\"random_par\"), scen.idx_names(\"random_par\")\n )\n\n # File can be read\n scen_empty.read_excel(tmp_path)\n\n assert len(scen_empty.par(\"random_par\")) == 25",
"def test_add_csv_data_00(self, mocker):\n fake_fields = self.fake.pylist(10, True, str)\n fake_data = []\n for _ in range(self.fake.random_digit()):\n fake_entry = {}\n for field in fake_fields:\n fake_entry[field] = self.fake.word()\n fake_data.append(fake_entry)\n\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), [])\n g.spreadsheet = Spreadsheet(None, None)\n g.worksheet = mocker.MagicMock()\n g.worksheet.append_row = mocker.MagicMock()\n g.add_csv_data(fake_fields, fake_data)\n\n assert not g.worksheet.append_row.call_count == len(fake_data)",
"def main():\n try:\n opts,args = getopt.getopt(sys.argv[1:], \"tl:s:o:h\", [\"title\", \"lines=\", \"sep=\", \"output=\", \"help\"])\n except getopt.GetoptError:\n usage()\n if (len(args) != 1):\n usage()\n inputFileName = args[0]\n try:\n inputFile = open(inputFileName, 'r')\n except IOError:\n print \"File not found:\", inputFileName, \"...aborting\"\n sys.exit(-1)\n titlePresent, linesPerFile, sepChar, outputFileName = validateOpts(opts)\n if (outputFileName == \"\"):\n outputFileName = getDefaultOutputFileName(inputFileName)\n workbook, worksheet = openExcelSheet(outputFileName)\n fno = 0\n lno = 0\n titleCols = []\n reader = csv.reader(inputFile, delimiter=sepChar)\n for line in reader:\n if (lno == 0 and titlePresent):\n if (len(titleCols) == 0):\n titleCols = line\n writeExcelHeader(worksheet, titleCols)\n else:\n writeExcelRow(worksheet, lno, line)\n lno = lno + 1\n if (linesPerFile != -1 and lno >= linesPerFile):\n closeExcelSheet(workbook, outputFileName)\n renameOutputFile(outputFileName, fno)\n fno = fno + 1\n lno = 0\n workbook, worksheet = openExcelSheet(outputFileName)\n inputFile.close()\n closeExcelSheet(workbook, outputFileName)\n if (fno > 0):\n renameOutputFile(outputFileName, fno)",
"def test_export(self):\n add_constituency_result_line('X, 10, C')\n r = self.client.get('/export/results')\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.data.decode('utf8').strip(), 'X, 10, C')",
"def test_export_data_trivial(self) -> None:\n user_data = (\n user_models.UserSubscriptionsModel.export_data(self.USER_ID_1))\n test_data: Dict[str, Union[List[str], None]] = {\n 'creator_usernames': [],\n 'collection_ids': [],\n 'exploration_ids': [],\n 'general_feedback_thread_ids': [],\n 'last_checked_msec': None\n }\n self.assertEqual(user_data, test_data)",
"def test_outputs(self, monkeypatch, script_runner):\n monkeypatch.setattr(\"builtins.input\", lambda _: \"n\")\n _ = script_runner.run(\n \"spectrafit\",\n \"spectrafit/test/test_data.txt\",\n \"-i\",\n \"spectrafit/test/test_input_2.json\",\n )\n assert len(list(Path(\".\").glob(\"*.json\"))) == 1\n assert len(list(Path(\".\").glob(\"*.csv\"))) == 3",
"def test_workflow_no_enriched_data(mock_env_home, set_workflow_config, input_path, output_path):\n # Create source and destination configurations\n source = set_workflow_config[1]\n destination = set_workflow_config[2]\n source[\"input_path\"] = input_path\n destination[\"output_path\"] = output_path\n\n # Create new workflow with source and destination configurations\n test_workflow = spy(TestWorkflowImpl(\n source=source, destination=destination, name=\"test-workflow-no-data\", custom_workflow_param=\"test_param\"\n ))\n io_writer = spy(test_workflow._io_writer)\n\n # Return empty dataframe when workflow runs\n when(test_workflow).workflow(...).thenReturn(DataFrame())\n\n # Verify io_writer does not write data\n verify(io_writer, times=0).write_data(...)\n\n # Verify that no output file created.\n assert os.path.exists(output_path) == False",
"def create_xlsx(request):\n\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output, worksheet, workbook, formats_dict = creating_empty_xlsx_file()\n\n if income_history:\n head_row, head_col = 1, 1\n row, col = 2, 1\n for i in income_history[0]:\n if i != 'income_history_id':\n worksheet.write(head_row, head_col, i, formats_dict['head_format'])\n head_col += 1\n\n for history_dict in income_history:\n worksheet.write(row, col, history_dict['income'], formats_dict['cell_format'])\n worksheet.write(row, col + 1, history_dict['fund'], formats_dict['cell_format'])\n date = datetime.datetime.strptime(history_dict['date'], \"%Y-%m-%d\")\n worksheet.write_datetime(row, col + 2, date, formats_dict['date_format'])\n worksheet.write_number(row, col + 3, history_dict['amount'],\n formats_dict['value_format'])\n worksheet.write(row, col + 4, history_dict['comment'], formats_dict['cell_format'])\n col, row = 1, row + 1\n\n workbook.close()\n\n response = file_streaming_response \\\n ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'income_history.xlsx', output)\n return response",
"def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()",
"def test_main_incorrect_type():\n with pytest.raises(Exception) as e_info:\n main([\"./excelAddinGenerator\", \"./src/data/xl/styles.xml\", \"fail.xlam\"])"
]
| [
"0.780116",
"0.69337434",
"0.6777988",
"0.6512617",
"0.640232",
"0.6325789",
"0.6285732",
"0.6262961",
"0.62007165",
"0.6108699",
"0.6036565",
"0.6028895",
"0.60118926",
"0.5993342",
"0.59584403",
"0.5911351",
"0.5878128",
"0.5867191",
"0.5837545",
"0.58317214",
"0.58210737",
"0.58023113",
"0.57659554",
"0.5764451",
"0.57422876",
"0.57380927",
"0.57303745",
"0.57256573",
"0.5723793",
"0.5714649"
]
| 0.72343 | 1 |
Takes a list of vertices and corresponding weights and returns the list of vertices resulting from the weighted average | def averageCurve(vertices, weights,a):
weightedCurve = [[PVector.mult(j,i[0]) for j in i[1]]
for i in zip(weights,vertices)]
average = [averageVertices(i,weights,a) for i in zip(*weightedCurve)]
return average | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _weightedAverage(list_):\n\n\t\taccum = [0, 0]\n\n\t\tfor point, weight in list_:\n\n\t\t\taccum[0] += point[0] * weight\n\t\t\taccum[1] += point[1] * weight\n\n\t\ttotalWeight = sum([weight for point, weight in list_])\n\n\n\t\tif totalWeight == 0:\n\t\t\t\n\t\t\treturn (0, 0)\n\n\n\t\taccum[0] /= float(totalWeight)\n\t\taccum[1] /= float(totalWeight)\n\n\t\treturn (accum[0], accum[1])",
"def weighted_average(items, weights):\n assert len(items) > 0\n assert len(items) == len(weights)\n # declare total as the return value which is a decimal\n total = 0.0\n # for all pairs from two lists\n for i in range(len(items)):\n \t# we increment the total for the product of both value\n \ttotal += items[i] * weights[i]\n # we return the total divided by sum of weights\n return total / sum(weights)",
"def weighted_average(items, weights):\r\n \r\n assert len(items) > 0\r\n assert len(items) == len(weights)\r\n\r\n a = items #Assign the items to a variable\r\n b = weights #Assign the weights to a variable\r\n x = list(items) #Transform the items to a list\r\n y = list(weights) #Transform the weights to a list\r\n sum1 = sum(weights) #Sum up all of the weights\r\n z = [a*b for a,b in zip(x, y)] #Multiply both lists by matching up the elements from both lists\r\n sum2 = sum(list(z)) #Take the sum of all of the products\r\n \r\n return float(sum2/sum1) #Divide the sum of the products by the sum of the weights to get the weighted average\r",
"def weighted_average(listofvalues):\n total = 0\n weights = 0\n for [w, v] in listofvalues:\n total += w*v\n weights += w\n return total/weights",
"def vector_weighted_average(vf, weights):\n weights_sum = weights.sum()\n y_average = (vf[:,:,0] * weights).sum() / weights_sum\n x_average = (vf[:,:,1] * weights).sum() / weights_sum\n return np.array([y_average, x_average])",
"def weighted_average(array, weights):\n assert len(array) == len(weights)\n return sum([x * w for x, w in zip(array, weights)]) / sum(weights)",
"def weighted_average(value_weight_list): \n numerator = sum([v * w for v,w in value_weight_list])\n denominator = sum([w for v,w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None",
"def weighted_average(value_weight_list):\n numerator = sum([v * w for v, w in value_weight_list])\n denominator = sum([w for v, w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None",
"def weightedMean(numlist, weights):\n\twxsum = 0.0\n\twsum = 0.0\n\n\tassert len(numlist) == len(weights)\n\n\tfor (x,w) in zip(numlist, weights):\n\t\twxsum += x*w\n\t\twsum += w\n\tif wsum == 0.0:\n\t\treturn 0.0\n\treturn wxsum/wsum",
"def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg",
"def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg",
"def average_weights(w):\n w_avg = copy.deepcopy(w[0])\n for key in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[key] += w[i][key]\n w_avg[key] = torch.div(w_avg[key], len(w))\n return w_avg",
"def weighted_avg(x, weights):\n return weights.unsqueeze(1).bmm(x).squeeze(1)",
"def average_to_vertices(self, dofs):\n data_qp, integral = self.interp_to_qp(dofs)\n vertex_dofs = self.average_qp_to_vertices(data_qp, integral)\n\n return vertex_dofs",
"def weightedMean(points, weights):\r\n\t\tweightedSum = sum([a*b for a,b in zip(points, weights)])\r\n\t\ttotalWeight = sum(weights)\r\n\t\treturn weightedSum / totalWeight",
"def variateOneWeight(weights):\n sts = []\n for i in range(len(weights)):\n st = [x[0] for x in weights[:i]]\n for w in weights[i][1:]:\n subs = []\n subs += st\n subs.append(w)\n for w2 in weights[i+1:]:\n subs.append(w2[0])\n sts.append((w.name, subs))\n return sts",
"def add_vertices(self, vertices: Iterable[\"Vertex\"]) -> Sequence[int]:\n indices = []\n precision = self.precision\n for vertex in vertices:\n vertex = Vec3(vertex)\n key = vertex.round(precision) # type: ignore\n try:\n index, count = self.ledger[key]\n except KeyError: # new key\n index = len(self.vertices)\n self.vertices.append(vertex)\n self.ledger[key] = (index, 1)\n else: # update key entry\n # calculate new average location\n average = (self.vertices[index] * count) + vertex\n count += 1\n # update vertex location\n self.vertices[index] = average / count\n # update ledger\n self.ledger[key] = (index, count)\n indices.append(index)\n return tuple(indices)",
"def weights_to_probs(G,edge_weights):\n new_edge_weights = []\n for v in G.vertices():\n outgoing_edges = [e for e in G.edges() if e[0] == v]\n outgoing_edge_weights = [(e,math.exp(w)) for e,w in edge_weights if e in outgoing_edges]\n S = sum([x[1] for x in outgoing_edge_weights])\n new_edge_weights.extend([(e,w/S) for e,w in outgoing_edge_weights])\n return new_edge_weights",
"def weightedAverage(requestContext, seriesListAvg, seriesListWeight, node):\n\n sortedSeries={}\n\n for seriesAvg, seriesWeight in izip(seriesListAvg , seriesListWeight):\n key = seriesAvg.name.split(\".\")[node]\n if key not in sortedSeries:\n sortedSeries[key]={}\n\n sortedSeries[key]['avg']=seriesAvg\n key = seriesWeight.name.split(\".\")[node]\n if key not in sortedSeries:\n sortedSeries[key]={}\n sortedSeries[key]['weight']=seriesWeight\n\n productList = []\n\n for key in sortedSeries.keys():\n if 'weight' not in sortedSeries[key]:\n continue\n if 'avg' not in sortedSeries[key]:\n continue\n\n seriesWeight = sortedSeries[key]['weight']\n seriesAvg = sortedSeries[key]['avg']\n\n productValues = [ safeMul(val1, val2) for val1,val2 in izip(seriesAvg,seriesWeight) ]\n name='product(%s,%s)' % (seriesWeight.name, seriesAvg.name)\n productSeries = TimeSeries(name,seriesAvg.start,seriesAvg.end,seriesAvg.step,productValues)\n productSeries.pathExpression=name\n productList.append(productSeries)\n\n sumProducts=sumSeries(requestContext, productList)[0]\n sumWeights=sumSeries(requestContext, seriesListWeight)[0]\n\n resultValues = [ safeDiv(val1, val2) for val1,val2 in izip(sumProducts,sumWeights) ]\n name = \"weightedAverage(%s, %s)\" % (','.join(set(s.pathExpression for s in seriesListAvg)) ,','.join(set(s.pathExpression for s in seriesListWeight)))\n resultSeries = TimeSeries(name,sumProducts.start,sumProducts.end,sumProducts.step,resultValues)\n resultSeries.pathExpression = name\n return resultSeries",
"def calculate_average(precisions, weights):\n tmp_res = 1\n for id, item in enumerate(precisions):\n tmp_res = tmp_res*np.power(item, weights[id])\n tmp_res = np.power(tmp_res, np.sum(weights))\n return tmp_res",
"def calculate_average(precisions, weights):\r\n tmp_res = 1\r\n for id, item in enumerate(precisions):\r\n tmp_res = tmp_res*np.power(item, weights[id])\r\n tmp_res = np.power(tmp_res, np.sum(weights))\r\n return tmp_res",
"def edge_weights(G, weight='weight'):\n for _, nbrdict in G.adjacency_iter():\n for edgedata in nbrdict.itervalues():\n yield edgedata[weight]",
"def weights(self) -> List[float]:",
"def weighted_avg(y_hats, weights=[]):\n df_y_hats = pd.DataFrame(y_hats).T\n if not weights:\n return df_y_hats.mean(axis=1).values\n\n return df_y_hats.apply(\n lambda row: np.dot(row.values, weights), axis=1).values / sum(weights)",
"def weighted_avg_and_std(values, weights):\n values = np.array(values)\n values = values[np.isfinite(values)]\n average = np.nanmean(values)\n # Fast and numerically precise:\n std = np.nanstd(values)\n return [np.round(average,3), np.round(std,3)]",
"def get_edges_weighted(self):\n edges = []\n for v in self.vertices.values():\n for w in v.neighbors:\n edges.append((v.name, w.name, v.neighbors[w]))\n return edges",
"def weighted_statistics(values: Sized, weights: Sized) -> List[float]:\n average = numpy.average(values, weights=weights, axis=0)[0]\n # Fast and numerically precise:\n variance = numpy.average((values - average) ** 2, weights=weights, axis=0)\n sd = math.sqrt(variance)\n error = 1.96 * sd / math.sqrt(len(values))\n return [average, math.sqrt(variance), len(values), error]",
"def mean(data, weights, **kws):\n return np.average(data, weights=weights, **kws)",
"def weighted_average(x: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) ->torch.Tensor:\n if weights is not None:\n weighted_tensor = torch.where(weights != 0, x * weights, torch.zeros_like(x))\n sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)\n return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights\n else:\n return x.mean(dim=dim)",
"def average_edge(pollster_edges, pollster_errors):\n # in order to make use of weighted_average() function, we need to create 2 lists\n pollsters = []\n weights = []\n # traverse all pollster edges\n for key in pollster_edges:\n \t# we update two lists with same order\n \tpollsters.append(pollster_edges[key])\n \tweights.append(pollster_to_weight(key, pollster_errors))\n # simply return the result of weighted_average() function\n return weighted_average(pollsters, weights)"
]
| [
"0.76076883",
"0.7525749",
"0.73337907",
"0.7230022",
"0.71708345",
"0.7100033",
"0.70964843",
"0.6970508",
"0.68913996",
"0.68118274",
"0.68118274",
"0.68118274",
"0.6765599",
"0.65661603",
"0.64742553",
"0.6403966",
"0.6368752",
"0.63204116",
"0.62720555",
"0.6237824",
"0.61972433",
"0.61911607",
"0.61547697",
"0.6133005",
"0.61278373",
"0.6086671",
"0.6059343",
"0.6005781",
"0.5947449",
"0.5921999"
]
| 0.81328255 | 0 |
Loads knowledge base to memory | def load_knowledge_base():
knowledge_base = {}
with open('knowledge_base.json') as f:
knowledge_base = json.load(f)
return knowledge_base | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_knowledge(self):\n MemoryManager.load_memory(self.knowledge_file)",
"def load_knowledge(net, filepath):\n\treloaded = loadz(filepath)\n\tknowledge = [(name, reloaded[name]) for name in sorted(reloaded.keys())]\n\tset_knowledge(net, knowledge)",
"def load(self):",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n pass",
"def load(self):\n\n raise NotImplementedError",
"def load_data(self) -> None:",
"def load(self):\n raise NotImplementedError",
"def load(self):\n raise NotImplementedError",
"def load_data(self):",
"def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))",
"def load(self):\n self._really_load()",
"def _load(self):\n raise NotImplementedError()",
"def load(self):\n raise NotImplementedError()",
"def load(self):\n raise NotImplementedError()",
"def load_words(): \r\n return lw.load_words()",
"def load(self):\n return",
"def load_kb(self):\n tf.logging.info('loading and indexing kb...')\n start = time.time()\n self.kb = sling.Store()\n self.kb.load(FLAGS.sling_kb_file)\n self.kb.freeze()\n tf.logging.info('loading took %.3f sec' % (time.time() - start))\n # these are used a lot\n self.instance_of = self.kb[INSTANCE_OF_ID]\n self.category = self.kb[CATEGORY_ID]\n # just in case\n self.english_cats = self.type_freq = None\n # space for kb construction\n self.collected_edges = collections.defaultdict(set)\n self.collected_names = {}\n self.collected_cat_mems = {}",
"def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint",
"def store(self):\n print(\"Please take a break, this will take a while :).\")\n\n wiki_db = GenericLookup(\n \"entity_word_embedding\",\n os.path.join(self.base_url, self.wiki_version, \"generated\"),\n table_name=\"wiki\",\n columns={\"p_e_m\": \"blob\", \"lower\": \"text\", \"freq\": \"INTEGER\"},\n )\n\n wiki_db.load_wiki(self.p_e_m, self.mention_freq, batch_size=50000, reset=True)",
"def load(self):\n self._load()",
"def load_data(self):\n raise NotImplementedError()",
"def load_data():\n dictionary = corpora.Dictionary.load(app.config['DICTIONARY'])\n matrix = similarities.MatrixSimilarity.load(app.config['MATRIX'])\n model = models.LsiModel.load(app.config['MODEL'])\n df = pd.read_pickle(app.config['DATA_FRAME'])\n return Data(matrix=matrix, model=model, dictionary=dictionary, data_frame=df)",
"def load_data(data_path):\n print(\"RNN Language MODEL: Loading gigaword corpus\")\n return data.CorpusGigaword(data_path)",
"def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()",
"def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()",
"def load_corpus_abstracts():\r\n\t\r\n\tglobal abstracts_dict\r\n\tif os.path.exists(paths.path_data_abstracts_pickle):\r\n\t\tprint('\\nloading abstracts')\r\n\t\tabstracts_dict = pickle.load(open(paths.path_data_abstracts_pickle,\"rb\"))\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint"
]
| [
"0.81079787",
"0.671406",
"0.6534463",
"0.6513961",
"0.6513961",
"0.6513961",
"0.6513961",
"0.6447636",
"0.6335379",
"0.63297707",
"0.63297707",
"0.63241786",
"0.62627333",
"0.62624913",
"0.61553985",
"0.61483",
"0.61483",
"0.613253",
"0.610389",
"0.60957575",
"0.5902059",
"0.5890207",
"0.58687013",
"0.58655894",
"0.5863515",
"0.5826935",
"0.58094585",
"0.57927746",
"0.5791585",
"0.5780442"
]
| 0.696881 | 1 |
Get recording's size in seconds | def get_recording_size(file_name):
recording_size = check_output(
["mp3info", "-p", "%m:%s\n", "{}".format(file_name)]).decode("utf-8")
print("Recording size:", str(recording_size))
minutes_seconds = (int(recording_size.split(":")[0]) * 60)
seconds = int(recording_size.split(":")[1].replace("\n", ""))
recording_seconds_size = minutes_seconds + seconds
print("Recording seconds size:", str(recording_seconds_size))
return recording_seconds_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def duration(self):\n return self.sound.nframes",
"def record_duration(self):\n return self.config.get('record_duration', 5)",
"def get_frame_duration(self):\n return self._frame_duration",
"def get_record_count(self):\n return os.path.getsize(self.path) / self._get_record_size()",
"def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration",
"def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)",
"def duration(self):\n duration = 0\n for wf in self._waveforms:\n duration += wf.duration\n return duration",
"def frame_duration(self):\n return self.samples_per_frame / self.input_data_sample_rate",
"def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)",
"def media_duration(self):\n return self._table.active_track_total_time.total_seconds()",
"def get_duration(self) -> int:\n return int( (self._frame_count / self._fps) * 1000 )",
"def size(self):\n if self.frames is None:\n return 0\n return self.frames.size",
"def duration():\r\n elapsed_time, duration = video_time()\r\n return duration",
"def duration(self):\n window_length = self.window_length\n if self.window_length is None:\n warnings.warn(\n \"spectrogram must have window_length attribute to\"\n \" accurately calculate duration. Approximating duration.\"\n )\n return self.times[-1]\n else:\n return self.times[-1] + window_length / 2",
"def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()",
"def duration(self) -> float:\n return float(len(self.__samples))/float(self.__rate)",
"def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.",
"def record_audio(self, time):\n p = pyaudio.PyAudio()\n stream = p.open(format=self.format,\n channels=self.channels,\n rate=self.rate,\n input=True,\n frames_per_buffer=self.chunk)\n\n print(\"* recording\")\n\n frames = []\n for i in range(0, int(self.rate / self.chunk * time)):\n data = stream.read(self.chunk)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n return p.get_sample_size(self.format), b''.join(frames)",
"def time_length(self):\n return self._time_length",
"def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur",
"def get_recording_length(file_path):\n f = open(file_path, 'rb')\n header = f.read(256)\n f.close()\n \n return int(header[236:244].decode('ascii'))",
"def to_length_secs(self):\n return (self.bpm / 60.0) / self.period",
"def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]",
"def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)",
"def fft_size(self):\n import supriya.ugens\n\n return supriya.ugens.BufFrames.ir(self.buffer_id)",
"def track_duration(self):\n return self._track_duration",
"def duration(self):\n pass",
"def duration(self):\n pass",
"def _get_duration(self):\n try:\n dur = self.im.info[\"duration\"] / 1000.0\n except KeyError:\n dur = DEFAULT_DURATION / 1000.0 \n\n return dur",
"def get_pixel_size_rec(rec, verbose=False):\n len_rec_x_pixel = 64\n len_rec_x_um = 71.5 / rec['wParamsNum'][30]\n \n rec_pixel_size = len_rec_x_um / len_rec_x_pixel\n \n if verbose:\n print(\"the real length of each pixel in this recording is: \\n{0} um\".format(rec_pixel_size))\n \n return rec_pixel_size"
]
| [
"0.7078902",
"0.70021707",
"0.68017805",
"0.67804694",
"0.6737474",
"0.6670569",
"0.6666141",
"0.6623355",
"0.6576982",
"0.65693134",
"0.6568904",
"0.6546339",
"0.65320414",
"0.65001947",
"0.64856166",
"0.64756674",
"0.6467947",
"0.6441833",
"0.6434663",
"0.6421729",
"0.63782686",
"0.63735914",
"0.63560474",
"0.6345435",
"0.6326475",
"0.63201344",
"0.6315167",
"0.6315167",
"0.63113487",
"0.63104534"
]
| 0.7635485 | 0 |
Test that webhook returns 500 for unkown action | def test_webhook_unkown_action(self):
event = {
"body": json.dumps({
"queryResult": {
"action": "1manage_bmi"
}})
}
context = {}
resp = webhook(event, context)
self.assertEqual(resp["statusCode"], 500)
self.assertEqual(resp["body"], json.dumps({})) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)",
"def test_webhook_empty_event(self):\n event = {\n 'body': json.dumps({})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))",
"def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)",
"def test_exceptions_give_500(self):\n\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_RAISE_EXCEPTION])), 500\n )\n\n # we also check that a successful device doesn't hide the exception\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_RAISE_EXCEPTION])\n ),\n 500,\n )\n\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_RAISE_EXCEPTION, DEVICE_ACCEPTED])\n ),\n 500,\n )",
"def test_not_accepted(self):\n response = {\"status_code\": 403}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertEqual(self.mock_send_mail.call_count, 1)",
"def test_invalid_webhook(self, mock_send):\n logging.disable(logging.CRITICAL) # Don't log to stderr during this unit test\n mock_send.side_effect = OSError(\"Some error\")\n send_notification(\"invalid_webhook\", self.message)\n mock_send.assert_called()\n logging.disable(logging.NOTSET) # Reset the logging",
"def webhook_event(self, event):\n\n return HttpResponse(\n content=f'Unhandled webhook obtained: {event[\"type\"]}',\n status=200)",
"def test_bad_action(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-bad\": \"3\"})\n\n res = self.view(req)\n\n self.assertEqual(self.mock_model._base_manager.get.call_count, 0)\n self.assertEqual(res.status_code, 302)",
"def test_POST_fetcher_fail():\n bad_url = POST_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_incorrect_token_post(self): \n request = self.build_request(token=\"incorrect_token\")\n response = self.app.post('/_ah/push-handlers/receive_message',json.dumps(request).encode('utf-8'),content_type=\"application/json\")\n self.assertEqual(response.status_int, 200)\n self.assertRaises(ValueError)",
"def test_custom_403(self):\n c = Client()\n response = c.get(\"/apimock/mocked/mocked_post?format=json\")\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n \"wrong used test Data,this is api for POST\", response.content)",
"def test_patch_actor_assistant_permissions_500(self): # assistant cannot patch actors\r\n res = self.client().patch('/actors/1', json=partial_actor, headers=casting_assistant)\r\n data = json.loads(res.data)\r\n\r\n self.assertEqual(res.status_code, 500)\r\n self.assertFalse(data[\"success\"])\r\n self.assertEqual(data[\"message\"], \"internal server error\")",
"def test_post_error_status_code(self):\n response = self.app.post('/mq_message', data={})\n assert response.status_code == 400",
"def test_bad_signature(fail_on_mismatch, settings, rf):\n app_key = '123appkey'\n setattr(settings, APP_KEY_SETTING, app_key)\n setattr(settings, FAIL_ON_MISMATCH_SETTING, fail_on_mismatch)\n view = OurVeryOwnReceiverView.as_view()\n request = rf.post(\n WEBHOOK_URL,\n GOOD_EVENT_LIST_JSON,\n content_type='application/json')\n\n response = view(request)\n if fail_on_mismatch:\n assert response.status_code == 400\n assert response.content == ErrorMessages.INVALID_SIGNATURE\n else:\n assert response.status_code == 200",
"def test_patch_movie_assistant_permissions_500(self): # assistant cannot patch movies\r\n res = self.client().patch('/movies/1', json=partial_movie, headers=casting_assistant)\r\n data = json.loads(res.data)\r\n\r\n self.assertEqual(res.status_code, 500)\r\n self.assertFalse(data[\"success\"])\r\n self.assertEqual(data[\"message\"], \"internal server error\")",
"def test_bad_request(self):\n self._error_test(fitbit_exceptions.HTTPBadRequest)",
"def test_500_internal_server_error(self):\n # create route to abort the request with the 500 Error\n @self.app.route('/500')\n def internal_server_error():\n abort(500)\n response = self.client.get('/500')\n self.assertEqual(response.status_code, 500)",
"def test_escalate_post_error(client):\n g.test_authorized_for = []\n res = client.post(\"/v0/escalate\", json={\"fingerprint\": \"splunk\"})\n assert \"500 INTERNAL SERVER ERROR\" in res.status",
"def test_405_response(self):\n mock = Mock()\n mock.status_code = 500\n\n with self.assertRaises(APIError):\n check_response(mock)",
"def test_acknowledge_hmac_validation_failed(client):\n res = client.get(\n \"/v0/acknowledge?fp=splunk_82998ef6bb3db9dff3dsfdsfsdc\" \"&t=97244b15a21f45e002b2e913866ff7545510f9b08dea5241f\"\n )\n assert res.status == \"500 INTERNAL SERVER ERROR\"",
"def test_escalate_error_post(client):\n g.test_authorized_for = []\n res = client.post(\"/v0/escalate\", json={\"fingerprint\": \"splunk_4025ad30<script>\"})\n assert \"500 INTERNAL SERVER ERROR\" in res.status",
"def test_500_internal_server_error(app, client):\n\n @app.route(\"/500\")\n def internal_server_error():\n abort(500)\n\n response = client.get(\"/500\")\n assert response.status_code == 500\n assert \"500 Internal Server Error\" in str(response.data)",
"def test_empty_request_returns_an_error(self, patch_process_event_with_plugins):\n\n # Empty GET\n response = self.client.get(\"/e/?data=\", content_type=\"application/json\", HTTP_ORIGIN=\"https://localhost\",)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(patch_process_event_with_plugins.call_count, 0)\n\n # Empty POST\n response = self.client.post(\"/e/\", {}, content_type=\"application/json\", HTTP_ORIGIN=\"https://localhost\",)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(patch_process_event_with_plugins.call_count, 0)",
"def bad_callback(_request, _uri, headers):\n return (404, headers, 'NOT AN ASSERTION')",
"def test_500_if_request_is_not_formed_poperly(self):\n res = self.client().post('/quizzes', json={\"previous_question\": []})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], \"unprocessable.\")",
"def test_webhook_bad_signature(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Invalid Travis CI webhook signature for status update %d.'\n % self.status_update.pk)",
"def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)",
"def test_with_internal_server_error(self, make_request):\n action_params = {\"filter\": \"internalServerError\"}\n with self.assertRaises(PluginException):\n self.action.run(action_params)",
"def test_webhook_bad_status_update(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % (self.status_update.pk + 1),\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(\n rsp.content,\n b'Unable to find matching status update ID %d.'\n % (self.status_update.pk + 1))",
"def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out"
]
| [
"0.69867796",
"0.698422",
"0.6960273",
"0.67963976",
"0.6699416",
"0.6666364",
"0.6647646",
"0.6636956",
"0.6598994",
"0.6533085",
"0.65311563",
"0.653109",
"0.651041",
"0.64883715",
"0.6477502",
"0.64637",
"0.6421816",
"0.6410432",
"0.6400015",
"0.639822",
"0.6357702",
"0.635198",
"0.6322129",
"0.6296868",
"0.6295559",
"0.6293048",
"0.62873423",
"0.6282082",
"0.62792474",
"0.62680745"
]
| 0.8470133 | 0 |
Test that webhook return 500 for empty body/action in event | def test_webhook_empty_event(self):
event = {
'body': json.dumps({})
}
context = {}
resp = webhook(event, context)
self.assertEqual(resp["statusCode"], 500)
self.assertEqual(resp["body"], json.dumps({})) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_webhook_unkown_action(self):\n event = {\n \"body\": json.dumps({\n \"queryResult\": {\n \"action\": \"1manage_bmi\"\n }})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))",
"def test_empty_request_returns_an_error(self, patch_process_event_with_plugins):\n\n # Empty GET\n response = self.client.get(\"/e/?data=\", content_type=\"application/json\", HTTP_ORIGIN=\"https://localhost\",)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(patch_process_event_with_plugins.call_count, 0)\n\n # Empty POST\n response = self.client.post(\"/e/\", {}, content_type=\"application/json\", HTTP_ORIGIN=\"https://localhost\",)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(patch_process_event_with_plugins.call_count, 0)",
"def test_webhook_no_env(self):\n payload = json.dumps({})\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 400)\n self.assertEqual(rsp.content, b'Got event without an env in config.')",
"def test_post_empty_data(self):\n response = self.app.post('/_ah/push-handlers/receive_message')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.body, \"No request body received\")\n self.assertRaises(ValueError)",
"def webhook_event(self, event):\n\n return HttpResponse(\n content=f'Unhandled webhook obtained: {event[\"type\"]}',\n status=200)",
"def handle_event(self, event):\n return HttpResponse(\n content=f'Unhandled webhook received: {event[\"type\"]}',\n status=200)",
"async def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"",
"async def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count",
"def test_no_headers_sent(self):\n\n def app(environ, start_response):\n yield \"Hello world\"\n\n event = {\n \"httpMethod\": \"POST\",\n \"path\": \"/\",\n \"queryStringParameters\": None,\n \"headers\": {\n \"Host\": \"localhost\",\n },\n \"body\": None\n }\n context = DummyContext()\n\n with self.assertRaisesRegexp(Exception, \"Headers must be sent before body\"):\n Handler(app)(event, context)",
"async def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"",
"async def test_api_fire_event_with_invalid_json(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\"\"\"\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_bad_data\", listener)\n\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps(\"not an object\")\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0\n\n # Try now with valid but unusable JSON\n resp = await mock_api_client.post(\n \"/api/events/test_event_bad_data\", data=json.dumps([1, 2, 3])\n )\n\n await hass.async_block_till_done()\n\n assert resp.status == HTTPStatus.BAD_REQUEST\n assert len(test_value) == 0",
"def test_incorrect_token_post(self): \n request = self.build_request(token=\"incorrect_token\")\n response = self.app.post('/_ah/push-handlers/receive_message',json.dumps(request).encode('utf-8'),content_type=\"application/json\")\n self.assertEqual(response.status_int, 200)\n self.assertRaises(ValueError)",
"def test_invalid_webhook(self, mock_send):\n logging.disable(logging.CRITICAL) # Don't log to stderr during this unit test\n mock_send.side_effect = OSError(\"Some error\")\n send_notification(\"invalid_webhook\", self.message)\n mock_send.assert_called()\n logging.disable(logging.NOTSET) # Reset the logging",
"def test_no_data():\n response = test_app.post(\"/bkt_service/unwind\", expect_errors=True)\n assert response.status == '400 Bad Request'\n assert \"No data\" in response.text",
"def test_no_action(self):\n self.request.log(\"Hello World\")\n self.request.end()\n entry = self.get_entry()\n assert entry['action'] is None",
"def test_exception_handler_on_web_request(self):\n lh = LambdaHandler(\"tests.test_exception_handler_settings\")\n\n event = {\n \"body\": \"\",\n \"resource\": \"/{proxy+}\",\n \"requestContext\": {},\n \"queryStringParameters\": {},\n \"headers\": {\n \"Host\": \"1234567890.execute-api.us-east-1.amazonaws.com\",\n },\n \"pathParameters\": {\"proxy\": \"return/request/url\"},\n \"httpMethod\": \"GET\",\n \"stageVariables\": {},\n \"path\": \"/return/request/url\",\n }\n\n mocked_exception_handler.assert_not_called()\n response = lh.handler(event, None)\n\n self.assertEqual(response[\"statusCode\"], 500)\n mocked_exception_handler.assert_called()",
"def test_exception_in_bot_triggered_event(self):\n lh = LambdaHandler(\"tests.test_bot_exception_handler_settings\")\n # from : https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-lex\n event = {\n \"messageVersion\": \"1.0\",\n \"invocationSource\": \"DialogCodeHook\",\n \"userId\": \"user-id specified in the POST request to Amazon Lex.\",\n \"sessionAttributes\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n },\n \"bot\": {\"name\": \"bot-name\", \"alias\": \"bot-alias\", \"version\": \"bot-version\"},\n \"outputDialogMode\": \"Text or Voice, based on ContentType request header in runtime API request\",\n \"currentIntent\": {\n \"name\": \"intent-name\",\n \"slots\": {\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n },\n \"confirmationStatus\": \"None, Confirmed, or Denied (intent confirmation, if configured)\",\n },\n }\n\n response = lh.lambda_handler(event, None)\n mocked_exception_handler.assert_called",
"def testNonJSONPayload(self):\n body = 'Invalid JSON'\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual(JSONRPC_PARSE_ERROR, response['error']['code'])\n message = 'Payload was not valid JSON.'\n self.assertEqual(message, response['error']['message'])\n self.assertIn(message, self.log.getvalue())\n self.assertIn('Request payload: Invalid JSON.', self.log.getvalue())",
"def test_accepted_with_no_message(self):\n response = {\"status_code\": 202, \"content\": \"\"}\n self.mock_response.configure_mock(**response)\n\n post_to_ext_app(\"fake_url\", \"fake_data\", \"fake_headers\")\n\n self.mock_post.assert_called_once_with(\"fake_url\", data=\"fake_data\", headers=\"fake_headers\")\n self.assertFalse(self.mock_send_mail.called)",
"def test_send_event_raises():\n send_event('pytest-reportportal', '5.0.5')",
"def test_create_empty_payload(self):\n response = self.client.post('/exercises/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_command_trigger_webhook_post(self):\n pass",
"async def test_webhook_endpoint_unauthorized_update_doesnt_generate_telegram_text_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n unauthorized_update_message_text,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_text\")\n\n response = await client.post(\n TELEGRAM_WEBHOOK_URL, json=unauthorized_update_message_text\n )\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure any events would have fired\n await hass.async_block_till_done()\n\n assert len(events) == 0",
"def test_nonexisting_event(self):\n response = self.client.get(\"/events/1\")\n self.assertEqual(response.status_code, 404)",
"def test_uptimerobot_invalid_payload_with_missing_data(self) -> None:\n self.url = self.build_webhook_url()\n payload = self.get_body(\"uptimerobot_invalid_payload_with_missing_data\")\n result = self.client_post(self.url, payload, content_type=\"application/json\")\n self.assert_json_error(result, \"Invalid payload\")\n\n expected_message = MISCONFIGURED_PAYLOAD_ERROR_MESSAGE.format(\n bot_name=self.test_user.full_name,\n support_email=FromAddress.SUPPORT,\n ).strip()\n\n msg = self.get_last_message()\n self.assertEqual(msg.content, expected_message)\n self.assertEqual(msg.recipient.type, Recipient.PERSONAL)",
"def test_execute_post_exception_invalid_filter():\n message = FakeMessage()\n message.raw_payload = json.dumps(\n TestData.JOB_TEMPLATE_POST_BAD_FILTERED_PAYLOAD_GZIPPED\n )\n headers = {\"Content-Type\": \"application/json\"}\n with aioresponses() as mocked:\n mocked.post(\n TestData.JOB_TEMPLATE_POST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATE_POST_RESPONSE),\n headers=headers,\n )\n with pytest.raises(Exception):\n worker.execute(message, TestData.RECEPTOR_CONFIG, queue.Queue())",
"def test_post_error_status_code(self):\n response = self.app.post('/mq_message', data={})\n assert response.status_code == 400",
"def test_uncaught_wsgi_exception(self):\n\n def app(environ, start_response):\n raise Exception(\"Oops\")\n\n event = {\n \"httpMethod\": \"POST\",\n \"path\": \"/\",\n \"queryStringParameters\": {\n \"x\": \"y\"\n },\n \"headers\": {\n \"Host\": \"localhost\",\n \"Content-Type\": \"text/plain\",\n \"Content-Length\": \"2\"\n },\n \"body\": \"Hi\"\n }\n context = DummyContext()\n\n with self.assertRaisesRegexp(Exception, \"Oops\"):\n result = Handler(app)(event, context)\n\n # TODO: Test exc_info is logged somewhere",
"def test_missing_body(self):\n self.is_authenticated()\n response = self.post_without_body()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_500_if_request_is_not_formed_poperly(self):\n res = self.client().post('/quizzes', json={\"previous_question\": []})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], \"unprocessable.\")"
]
| [
"0.82119334",
"0.7412875",
"0.7366841",
"0.72384053",
"0.70663595",
"0.67772496",
"0.6771868",
"0.6729244",
"0.66838586",
"0.6642581",
"0.66096735",
"0.6549085",
"0.65390784",
"0.64993393",
"0.64882827",
"0.64425206",
"0.64246047",
"0.6402282",
"0.63719505",
"0.6359598",
"0.6335726",
"0.6306914",
"0.6292355",
"0.6292038",
"0.62457913",
"0.62385786",
"0.62229025",
"0.6182634",
"0.61751413",
"0.6172435"
]
| 0.87026906 | 0 |
Calculates surface area of square | def square_surface_area(a):
return (a*a) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())",
"def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2",
"def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea",
"def rectangle_surface_area(a,b):\n return (a*b)",
"def surface_area_of_cube(side):\n return side",
"def surface_area_cube(side_length: float) -> float:\r\n if side_length < 0:\r\n raise ValueError(\"surface_area_cube() only accepts non-negative values\")\r\n return 6 * side_length**2",
"def square_area(side):\n return side**2",
"def getSurfaceArea(self) -> float:\n return self.area()",
"def circle_surface_area(a):\n return (a*a*math.pi)",
"def area(poly):\n if len(poly) < 3: # not a plane - no area\n return 0\n total = [0, 0, 0]\n num = len(poly)\n for i in range(num):\n vi1 = poly[i]\n vi2 = poly[(i+1) % num]\n prod = np.cross(vi1, vi2)\n total[0] += prod[0]\n total[1] += prod[1]\n total[2] += prod[2]\n result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))\n return abs(result/2)",
"def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def surface_area(DEM, resolution):\n\n resolution_squared = resolution ** 2.\n cross_distance_squared = 2.0 * (resolution ** 2.)\n\n m1 = ((DEM[0:-1, 0:-1] - DEM[0:-1, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m2 = ((DEM[0:-1, 0:-1] - DEM[1:, 0:-1]) ** 2.0 + resolution_squared) ** 0.5\n m3 = ((DEM[0:-1, 0:-1] - DEM[1:, 1:]) ** 2.0 + cross_distance_squared) ** 0.5\n m4 = ((DEM[0:-1, 1:] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n m5 = ((DEM[1:, 0:-1] - DEM[1:, 1:]) ** 2.0 + resolution_squared) ** 0.5\n\n #from pdb import set_trace; set_trace()\n # Heron's formula for computing the area of a triangle, knowing 3 sides lengths,\n # requires a semiperimeter variable \"s\"\n s1 = 0.5 * (m3 + m5 + m2)\n s2 = 0.5 * (m3 + m4 + m1)\n\n # Calculate area using Heron's formula. This computes the upper and lower triangle area for each set of 4 dem points\n area = np.sum(np.sqrt(s1 * (s1 - m3) * (s1 - m5) * (s1 - m2))) + np.sum(np.sqrt(s2 * (s2 - m3) * (s2 - m4) * (s2 - m1)))\n\n return area",
"def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)",
"def area_equilat(side):\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)",
"def surface_area(self):\n return self._surface_area",
"def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area",
"def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")",
"def area(symbol):\n return (symbol.bounding_box.vertices[2].x - symbol.bounding_box.vertices[0].x) * (\n symbol.bounding_box.vertices[2].y - symbol.bounding_box[0].y)",
"def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)",
"def compute_area(boxes):\n area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n return area",
"def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))",
"def area(x, y):\n return x*y/2",
"def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area",
"def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))",
"def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area",
"def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area",
"def test_square(self):\n result = shape_area.square_area(5)\n self.assertEqual(result,25)",
"def box_area(box):\n x1, y1, x2, y2 = box\n w = x2 - x1\n h = y2 - y1\n return float(w) * h"
]
| [
"0.7919954",
"0.76347554",
"0.76003134",
"0.73753244",
"0.7210805",
"0.70903355",
"0.7076613",
"0.7038129",
"0.7034476",
"0.6993899",
"0.69891864",
"0.6846133",
"0.6832713",
"0.6830783",
"0.68109065",
"0.6806593",
"0.68017524",
"0.6777848",
"0.6749091",
"0.67174566",
"0.6696241",
"0.6642536",
"0.6640111",
"0.66396505",
"0.6633332",
"0.66023237",
"0.65681624",
"0.6564185",
"0.65602964",
"0.65587556"
]
| 0.8115568 | 0 |
Calculates total circumferences of a rectangle | def rectangle_circumference(a,b):
return (2*(a+b)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def circumference(self):\n return self.width + self.height",
"def circumference(self):\n raise NotImplementedError",
"def circumference(self):\n return math.pi * self.radius * 2",
"def circumference(self):\n return (2 * math.pi * self.__radius)",
"def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)",
"def _arc_radius(height_in_units):\n return height_in_units * _ARC_HEIGHT_UNIT / (1 - math.cos(_ANGLE))",
"def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area",
"def circle_circumference(a):\n return (2*a*math.pi)",
"def circleArea(radius):\n return math.pi * radius * radius",
"def area_circle(r):\n return (r ** 2) * math.pi",
"def perimeter(self):\n\t\treturn 2 * (self.width + self.height)",
"def circum(radius, places):\n return 2 * pi * radius",
"def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2",
"def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))",
"def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))",
"def circle_area(self):\n return np.pi * self.ring_radius ** 2",
"def area_of_circle(radius):\n return radius",
"def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")",
"def perimeter(self):\r\n return (2*self.width) + (2*self.height)",
"def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)",
"def areaOfQuadrilateral(rect):\n rect = np.array(rect)\n A = rect[..., 0, :]\n B = rect[..., 1, :]\n C = rect[..., 2, :]\n D = rect[..., 3, :]\n return 0.5 * np.abs((A[..., 1] - C[..., 1]) * (D[..., 0] - B[..., 0]) + (B[..., 1] - D[..., 1]) * (A[..., 0] - C[..., 0]))",
"def perimeter(self):\n return 2 * (self.height + self.width)",
"def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)",
"def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius",
"def perimeter(self):\n return sum([s.length for s in self.segments])",
"def circle_area(radius):\n return math.pi * radius ** 2",
"def angle_calc(sides):\n return 360//sides",
"def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2",
"def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)",
"def perimeter(self):\n\t\treturn self.height * 4"
]
| [
"0.738668",
"0.7031609",
"0.69710547",
"0.6853785",
"0.6589618",
"0.65277773",
"0.6506341",
"0.6492389",
"0.64720625",
"0.644245",
"0.643771",
"0.64178556",
"0.6397216",
"0.6363049",
"0.63360244",
"0.6303689",
"0.62996864",
"0.6289533",
"0.6289518",
"0.6276146",
"0.6274405",
"0.627184",
"0.62347054",
"0.6219327",
"0.61740977",
"0.61634094",
"0.616251",
"0.61507386",
"0.61499953",
"0.6137922"
]
| 0.74838096 | 0 |
calculates surface area of a rectangle | def rectangle_surface_area(a,b):
return (a*b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())",
"def rect_area(rect):\n return rect[2] * rect[3]",
"def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])",
"def area_rect(w, h):\n return w * h",
"def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea",
"def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")",
"def areaOfQuadrilateral(rect):\n rect = np.array(rect)\n A = rect[..., 0, :]\n B = rect[..., 1, :]\n C = rect[..., 2, :]\n D = rect[..., 3, :]\n return 0.5 * np.abs((A[..., 1] - C[..., 1]) * (D[..., 0] - B[..., 0]) + (B[..., 1] - D[..., 1]) * (A[..., 0] - C[..., 0]))",
"def getSurfaceArea(self) -> float:\n return self.area()",
"def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2",
"def rectangle_area(side1, side2):\n return float(side1) * float(side2)",
"def rectangle_area(coordinates):\n return (coordinates[2] - coordinates[0]) * (coordinates[3] - coordinates[1])",
"def area(symbol):\n return (symbol.bounding_box.vertices[2].x - symbol.bounding_box.vertices[0].x) * (\n symbol.bounding_box.vertices[2].y - symbol.bounding_box[0].y)",
"def rectangle_area(width : number, height : number) ->number:\n area = width*height\n #print(\"The area of rectangle is =\", area, \"sq. units\")\n return area",
"def square_surface_area(a):\n return (a*a)",
"def rectangle_area(base, height):\n return (base * height)",
"def box_area(box):\n x1, y1, x2, y2 = box\n w = x2 - x1\n h = y2 - y1\n return float(w) * h",
"def surface_area(self):\n return self._surface_area",
"def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r",
"def surface_area_of_cube(side):\n return side",
"def area_of(self, left_top, right_bottom):\n hw = np.clip(right_bottom - left_top, 0.0, None)\n return hw[..., 0] * hw[..., 1]",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area",
"def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])",
"def area(poly):\n if len(poly) < 3: # not a plane - no area\n return 0\n total = [0, 0, 0]\n num = len(poly)\n for i in range(num):\n vi1 = poly[i]\n vi2 = poly[(i+1) % num]\n prod = np.cross(vi1, vi2)\n total[0] += prod[0]\n total[1] += prod[1]\n total[2] += prod[2]\n result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))\n return abs(result/2)",
"def area(width, height):\n return width * height",
"def areaRect(length, width):\n return length * width",
"def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)",
"def rectArea(base, height):\n return base * height",
"def area_of(left_top, right_bottom):\n hw = np.clip(right_bottom - left_top, 0.0, None)\n return hw[..., 0] * hw[..., 1]",
"def area_of(left_top, right_bottom):\n hw = np.clip(right_bottom - left_top, 0.0, None)\n return hw[..., 0] * hw[..., 1]"
]
| [
"0.77162355",
"0.75253814",
"0.7490359",
"0.74618113",
"0.7423974",
"0.7329641",
"0.72515327",
"0.725146",
"0.7233866",
"0.721391",
"0.72130924",
"0.7122993",
"0.7114237",
"0.7041266",
"0.7039922",
"0.7016561",
"0.6981435",
"0.69292253",
"0.692703",
"0.6900223",
"0.6870387",
"0.6855837",
"0.6844328",
"0.6837437",
"0.6829881",
"0.6812645",
"0.6808407",
"0.6802661",
"0.6793347",
"0.6793347"
]
| 0.8167863 | 0 |
Calcualtes surface area of a circle | def circle_surface_area(a):
return (a*a*math.pi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2",
"def circle_area(circle):\n return pi * circle.radius * circle.radius",
"def circleArea(radius):\n return math.pi * radius * radius",
"def circle_area(radius):\n return math.pi * radius ** 2",
"def area_of_circle(radius):\n return radius",
"def circle_area(self):\n return np.pi * self.ring_radius ** 2",
"def circle_area(radius):\n area = radius ** 2 * math.pi\n return area",
"def area_circle(r):\n return (r ** 2) * math.pi",
"def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area",
"def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area",
"def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)",
"def area_of_circle(r):\n a = r**2 * math.pi\n return a",
"def area_of_circle(r):\n a = r**2 * math.pi\n return a",
"def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area",
"def circle_area(radius : number) -> number:\n area = pi*radius*radius\n #print(\"The area of circle is =\", area, \"sq.units\")\n return area",
"def surface_area_cone(radius: float, height: float) -> float:\r\n if radius < 0 or height < 0:\r\n raise ValueError(\"surface_area_cone() only accepts non-negative values\")\r\n return pi * radius * (radius + (height**2 + radius**2) ** 0.5)",
"def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area",
"def area(self):\n return math.pi * self.radius ** 2",
"def area(self):\n return math.pi * self.radius ** 2",
"def area(self):\n return self.radius*self.radius*math.pi",
"def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())",
"def getArea(self):\n return math.pi * self.radius ** 2",
"def area(self):\r\n return math.pi*(self.__radius**2)",
"def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2",
"def compute_area(radius):\n radius = int(input(\"What is the radius of the circle? \\n> \"))\n \n while radius <=0:\n radius = int(input(\"Sorry, must give a number greater than 0. \\n> \"))\n \n area = (pi * pow(radius, 2))\n \n #t.circle(radius)\n \n return area",
"def area(self):\n return (self.__radius ** 2 * math.pi)",
"def getArea(self):\n return math.pi * self.__radius * self.__radius",
"def area(self):\n return math.pi * math.pow(self.radius, 2)",
"def surface_area_sphere(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"surface_area_sphere() only accepts non-negative values\")\r\n return 4 * pi * radius**2",
"def circle_area(pop):\n\treturn math.pi * pop / (200.0 ** 2)"
]
| [
"0.8024205",
"0.78188527",
"0.7663585",
"0.7590993",
"0.7516806",
"0.7515295",
"0.74730927",
"0.747086",
"0.7412868",
"0.7388443",
"0.7359613",
"0.72836643",
"0.72836643",
"0.7241308",
"0.7222641",
"0.7126074",
"0.71226263",
"0.7098659",
"0.7098659",
"0.7093549",
"0.7084029",
"0.70773786",
"0.7055799",
"0.70231855",
"0.70182353",
"0.7010384",
"0.6985101",
"0.6981598",
"0.69192815",
"0.6914857"
]
| 0.8019704 | 1 |
If there are X company families currently running plan b, AND config throttles to Y company families, AND X is less than Y, then we should find Y X company families to run. | def test_get_company_families_in_need_of_plan_b_positive(self):
self.mock.max_simultaneous_plan_bs = 5
self.mock.currently_running_companies = [[1, "run_id"], [2, "run_id"], [3, "run_id"]]
needs_plan_b_companies = [[4, None], [5, None], [6, None]]
expected_number_of_tasks_to_create = 4
expected_company_families_in_need_of_plan_b = [[4, None], [5, None]]
self.mock._get_companies_in_need_of_plan_b().AndReturn(needs_plan_b_companies)
self.mock._get_company_families_to_run(expected_number_of_tasks_to_create, needs_plan_b_companies).AndReturn(expected_company_families_in_need_of_plan_b)
self.mox.ReplayAll()
# run, baby!
CompanyAnalyticsPlanBRunner._get_company_families_in_need_of_plan_b(self.mock)
self.assertEqual(self.mock.company_families_in_need_of_plan_b, expected_company_families_in_need_of_plan_b)
self.assertEqual(self.mock.max_simultaneous_plan_bs_running, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_company_families_in_need_of_plan_b_negative(self):\n self.mock.max_simultaneous_plan_bs = 5\n self.mock.currently_running_companies = [[1, \"run_id1\"], [2, \"run_id2\"], [3, \"run_id3\"], [4, \"run_id4\"], [5, \"run_id5\"]]\n self.mock.company_families_in_need_of_plan_b = None\n\n self.mox.ReplayAll()\n\n # run, baby!\n CompanyAnalyticsPlanBRunner._get_company_families_in_need_of_plan_b(self.mock)\n\n self.assertIsNone(self.mock.company_families_in_need_of_plan_b)\n self.assertEqual(self.mock.max_simultaneous_plan_bs_running, True)",
"def get_available_companies_and_people(team):",
"def find_best_matching_url(self):\n\n if self.filter_kvks:\n overlap_kvk = self.company_df.index.intersection(set(self.filter_kvks))\n self.company_df = self.company_df.loc[overlap_kvk]\n\n # set flag for all kvk processed longer than older_time ago\n delta_time = self.current_time - self.company_df[DATETIME_KEY]\n mask = (delta_time >= self.older_time) | delta_time.isna()\n if not self.force_process and not self.rescan_missing_urls:\n self.company_df = self.company_df[mask.values]\n\n self.logger.info(\"Start finding best matching urls for proc {}\".format(self.i_proc))\n\n # count the number of none-processed queries (ie in which the processed flag == False\n # we have already imposed the max_entries option in the selection of the ranges\n self.logger.info(\"Counting all...\")\n if self.maximum_entries:\n max_queries = self.maximum_entries\n else:\n max_queries = self.company_df.index.size\n self.logger.info(\"Start processing {} queries\".format(max_queries))\n\n if self.progressbar and self.showbar:\n pbar = tqdm(total=max_queries, position=self.i_proc, file=sys.stdout)\n pbar.set_description(\"@{:2d}: \".format(self.i_proc))\n else:\n pbar = None\n\n start = time.time()\n # loop over all the companies kvk numbers\n cnt = 0\n for index, row in self.company_df.iterrows():\n\n # first check if we do not have to stop\n if self.maximum_entries is not None and cnt == self.maximum_entries:\n self.logger.info(\"Maximum entries reached\")\n break\n if os.path.exists(STOP_FILE):\n self.logger.info(\"Stop file found. Quit processing\")\n os.remove(STOP_FILE)\n break\n\n kvk_nummer = index\n company_name = get_string_name_from_df(NAME_KEY, row, index, self.company_df)\n self.logger.info(\"Processing {} ({})\".format(kvk_nummer, company_name))\n\n cnt += 1\n\n if self.search_urls:\n self.logger.info(\"Start a URL search for this company first\")\n\n # for this kvk, get the list of urls + the address info\n company_urls_df = self.website_df[self.website_df[KVK_KEY] == kvk_nummer].reset_index()\n company_addresses_df = self.address_df[self.address_df[KVK_KEY] == kvk_nummer]\n\n try:\n # match the url with the name of the company\n company_url_match = \\\n CompanyUrlMatch(company_record=row,\n kvk_nr=kvk_nummer,\n company_name=company_name,\n current_time=self.current_time,\n company_urls_df=company_urls_df,\n company_addresses_df=company_addresses_df,\n urls_df=self.url_df,\n imposed_urls=self.impose_url_for_kvk,\n distance_threshold=self.threshold_distance,\n string_match_threshold=self.threshold_string_match,\n i_proc=self.i_proc,\n store_html_to_cache=self.store_html_to_cache,\n max_cache_dir_size=self.max_cache_dir_size,\n internet_scraping=self.internet_scraping,\n force_ssl_check=self.force_ssl_check,\n older_time=self.older_time,\n timezone=self.timezone,\n exclude_extension=self.exclude_extension,\n filter_urls=self.filter_urls,\n force_process=self.force_process,\n rescan_missing_urls=self.rescan_missing_urls,\n logger=self.logger\n )\n\n self.logger.debug(\"Done with {}\".format(company_url_match.company_name))\n except pw.DatabaseError as err:\n self.logger.warning(f\"{err}\")\n self.logger.warning(\"skipping\")\n else:\n # succeeded the match. Now update the sql tables atomic\n self.update_sql_tables(kvk_nummer, company_url_match)\n\n if pbar:\n pbar.update()\n\n if pbar is not None:\n pbar.close()\n\n duration = time.time() - start\n self.logger.info(f\"Done processing in {duration} seconds\")\n # this is not faster than save per record\n # with Timer(\"Updating tables\") as _:\n # query = (Company.update(dict(url=Company.url, processed=Company.processed)))\n # query.execute()",
"def get_available_companies(team):",
"def ConstrRank():\n with open(path.join(MAIN_PATH, RANK)) as f:\n ranked_data = []\n for line in f:\n ranked_data.append(line.strip().split()[0]) \n\n threshold = 5000\n global rank_less\n global rank_more\n rank_less = ranked_data[:threshold]\n rank_more = ranked_data[threshold:]\n\n with open(path.join(MAIN_PATH, INST)) as f:\n for line in f:\n line = line.strip().split(\",\")\n exists.append(line[0:2])",
"def brute_force_cow_transport(cows,limit=10):\n \n trip_options = get_partitions(cows)\n best_option = None\n fewest_trips = len(cows)\n \n for option in trip_options:\n valid = True\n #check to see if individual trips in each option don't exceed weight limit\n for trip in option:\n weight = sum(list(map(lambda x : cows[x] , trip)))\n if weight>limit:\n valid = False\n break\n #compare to best option so far, and replace if fewer number of trips\n if valid:\n if len(option)<=fewest_trips:\n best_option = option\n fewest_trips = len(option)\n \n return best_option",
"def decideWorkers(availableWorkers,today,num=4,mopping=False):\n secondary = 0 # count for how many secondaries are working today\n\n availableWorkers.sort(key=lambda worker: worker.getCleanCount())\n for minion in availableWorkers[:num]: # take the first four lowest\n if minion.getWorkEnd() != 9.0: # find the number of secondaries\n secondary += 1\n\n for minion in availableWorkers[:num+secondary]: # take the first four lowest and additionals to replace secondaries\n if mopping and (minion.getWorkEnd() == 9.0): #only people who will be at the end of lab to mop gets mopping counted\n minion.mopped()\n else: #if they aren't mopping or cannot mop, they only clean\n minion.cleaned()\n # Add this assignment to the schedule\n working = (minion.getName(),minion.getCleanCount()) #make the name and clean count a tuple\n \n \n \n today.append(working)\n return today",
"def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14",
"def find_standby2(model, limit, w_a, w_b, w_m, demand_path, demand_dict, slink_dict):\n #print slink_dict\n #print \"FIND\"\n failed_dict = model.failed_dict\n #print failed_dict, limit\n dist_matrix = model.cost_dict['dist']\n rtt_matrix = model.cost_dict['rtt']\n bw_matrix = model.cost_dict['bw']\n #cpu_vector = model.cost_dict['cpu']\n selected_dict = {}\n \n # get aggregated residual bw for all substrate nodes, and store it as a list\n snode_bw_list = total_bw(bw_matrix)\n \n # get total capacity and used bw for each snode\n node_port_list, used_bw_list = total_port(model)\n #vnet_set = model.vnets\n sorted_vn = sort_vnet(model)\n\n #for vnet in vnet_set:\n if w_m[2] >= 10*w_m[1]:\n threshold = 0.8\n else:\n threshold = 0.8\n snode_traffic = {}\n for vn_traffic in sorted_vn:\n vnet = vn_traffic[0]\n failed_vr = failed_dict[vnet.vnet_id]\n if failed_vr != -1: \n # this node is failed\n standby_list = vnet.get_standby_ids()\n standby_cost = {}\n for s_vr in standby_list:\n dist_f = float(dist_matrix[s_vr][failed_vr + 1])\n failed_node = vnet.vnodes[failed_vr]\n vneighbors = failed_node.vneighbors\n dist_k = 0\n rtt_k = 0\n for k in vneighbors:\n dist_k += float(dist_matrix[s_vr][k + 1])\n rtt_k += float(rtt_matrix[s_vr][k + 1])\n\n connect_cost = w_b[0] * (w_a[0] * dist_f + w_a[1] * dist_k) +\\\n w_b[1] * rtt_k\n res_cost = snode_bw_list[s_vr]\n req_bw = sum(failed_node.neighbor_traffic.values())\n total = w_m[1] * connect_cost + w_m[2] * req_bw / res_cost\n standby_cost[s_vr] = total\n sorted_x = sorted(standby_cost.iteritems(), key=operator.itemgetter(1))\n #print \"SORTED\", sorted_x\n \n for item in sorted_x:\n if item[0] not in snode_traffic:\n #utilization = vn_traffic[1] / total_bw(bw_matrix)[item[0]]\n utilization = (vn_traffic[1] + used_bw_list[item[0]])/node_port_list[item[0]]\n else:\n #utilization = (snode_traffic[item[0]] + vn_traffic[1]) / total_bw(bw_matrix)[item[0]]\n utilization = (snode_traffic[item[0]] + vn_traffic[1] + used_bw_list[item[0]])/node_port_list[item[0]]\n #print utilization\n # Link-Path selsection add-on\n path_alloc = 1\n for k in vneighbors:\n demand_id = find_demand_id(demand_dict, vnet.vnet_id, failed_vr + 1,\n item[0] + 1, k + 1)\n demand = demand_dict[demand_id]['capacity']\n find, path = find_path(demand_path, demand_id, \n slink_dict, demand) \n if find == 0:\n print \"No available path between svr and nbr on the substrate network\" \n path_alloc = 0\n #print \"FIND PATH\", find, path\n # End link-path block \n #print \"ALLOCATED: \", path_alloc\n if path_alloc == 1:\n if selected_dict.values().count(item[0]) < limit:\n if utilization < threshold and w_m[2] >= 10*w_m[1]:\n if item[0] not in snode_traffic: \n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n snode_traffic[item[0]] = vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1], slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n break;\n else:\n min_id = find_min(sorted_x, bw_matrix, snode_traffic, vn_traffic[1]) \n if min_id == item[0]:\n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n snode_traffic[item[0]] += vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1],slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n #threshold = (threshold + 0.01)/2\n break\n elif utilization < threshold:\n selected_dict[vnet.vnet_id] = item[0]\n snode_bw_list[item[0]] -= vn_traffic[1]\n if item[0] not in snode_traffic: \n snode_traffic[item[0]] = vn_traffic[1]\n else:\n snode_traffic[item[0]] += vn_traffic[1]\n for slink_id in path:\n #print vn_traffic[1],slink_dict[slink_id]['capacity']\n slink_dict[slink_id]['capacity'] = slink_dict[slink_id]['capacity'] - vn_traffic[1]\n #print slink_dict[slink_id]['capacity']\n break\n else:\n print \"does not satisfy the threshold\" \n # if a svr is selected -- item[0]\n \n \n \n else:\n print \"cannot allocate paths\"\n \n #print slink_dict \n return selected_dict, slink_dict",
"def run_sc(no_prods, prev_ledg_update, list_of_workers, no_prod):\n \n list_of_rands = []\n\n for worker_info in reversed(list_of_workers):\n print(worker_info[0])\n if check_fees(worker_info[3]) == True:\n print(\"Worker \", worker_info[0], \"paid their fees\")\n\n elif check_fees(worker_info[3]) == False:\n \n print(\"Worker \", worker_info[0], \"did not pay their fees\")\n list_of_workers.remove(worker_info)\n \n continue \n \n if check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == True:\n print(\"Worker \", worker_info[0], \"has a well formed random\")\n \n\n elif check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == False:\n print(\"Worker \", worker_info[0], \"failed to produce a well formed random\")\n list_of_workers.remove(worker_info)\n\n continue\n \n\n list_of_rands.append(worker_info[1])\n\n global_rand = gen_big_rand(list_of_rands)\n\n if global_rand == 0:\n print(\"Something went wrong global_rand was 0\")\n\n dist_list = get_dist_from_big_rand(global_rand, list_of_workers) \n PIDs = find_prod_ids(dist_list, no_prod)\n\n for producer in PIDs:\n print (\"Worker -->\", producer, \"has been selected as a producer for this cycle\")",
"def fits(x, y):\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk",
"def brute_force_cow_transport(cows, limit=10):\n # TODO: Your code here\n comb = []\n for i in get_partitions(cows.keys()):\n comb.append(i)\n\n z = []\n for i in range(len(comb)):\n a = []\n for j in range(len(comb[i])):\n b = []\n for k in comb[i][j]:\n b.append(cows[k])\n if sum(b) > limit:\n break\n a.append(comb[i][j])\n if len(a) == len(comb[i]):\n z.append(a)\n\n num = []\n for i in range(len(z)):\n num.append(len(z[i]))\n\n for i in z:\n if len(i) == min(num):\n return i",
"def bridge_problem3(here):\r\n\r\n def all_over(state):\r\n here, _ = state\r\n return not here or here == set([\"light\"])\r\n\r\n start = (frozenset(here) | frozenset([\"light\"]), frozenset())\r\n return lowest_cost_search(start, bsuccessors2, all_over, bcost)",
"def numRescueBoats(self, people, limit):\n queue = collections.deque(sorted(people))\n count = 0\n while queue:\n count += 1\n last = queue.pop()\n if len(queue) >= 1:\n first = queue[0]\n if first + last <= limit:\n queue.popleft()\n return count",
"def _validateSpies(self, config, team, sabotaged):\r\n spies = [s for s in team if s in self.getSpies(config)]\r\n \"\"\"If there are more spies in our config than the number of sabotages made \r\n then return True, because this config is compatible with the sabotages made. \r\n Otherwise it is not compatible, so return False.\"\"\"\r\n return len(spies) >= sabotaged",
"def test_calculate_class_2_individuals_best_response_simulation_all_inds_in_one():\n all_individuals_to_first = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=0.1,\n lambda_1_2=3,\n mu_1=10,\n mu_2=2,\n num_of_servers_1=8,\n num_of_servers_2=4,\n threshold_1=6,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_first == 1\n\n all_individuals_to_second = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=3,\n lambda_1_2=0.1,\n mu_1=2,\n mu_2=10,\n num_of_servers_1=4,\n num_of_servers_2=8,\n threshold_1=3,\n threshold_2=6,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_second == 0",
"def slot_avaialble(application, approved_loans, K):\n return len(intersected_applications(application, approved_loans)) < K",
"def brute_force_cow_transport(cows,limit=10):\n trip_options = []\n\n for partition in get_partitions(cows.items()):\n ledger = [] #clear trips ledger between \n \n for trip in partition:\n trip_wt = sum(cow[1] for cow in trip)\n if trip_wt <= limit:\n ledger.append([cow[0] for cow in trip]) #adds list of names to list\n continue\n else: break #next partition but hits next line first...\n \n if len(ledger) == len(partition): #checks if above loop completed vs broke\n trip_options.append(ledger) \n\n return trip_options",
"def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel",
"def filter_rare_node(users, businesses, reviews, user_thresh, business_thresh, friend_thresh):\n continue_filter = True\n filtered_users = set()\n filtered_businesses = set()\n while continue_filter:\n continue_filter = False\n # filter step 1\n users_interact_ind = {}\n business_interact_ind = {}\n for review in reviews:\n user_id = review['user_id'] # a list\n business_id = review['business_id'] # a list\n users_interact_ind[user_id] = users_interact_ind.get(user_id, 0) + 1\n business_interact_ind[business_id] = business_interact_ind.get(business_id, 0) + 1\n\n filtered_review_users = set(u for u in users_interact_ind.keys() if users_interact_ind[u]>=user_thresh)\n filtered_review_businesses = set(b for b in business_interact_ind.keys() if business_interact_ind[b]>=business_thresh)\n \n # loop until users' reviews equal to filtered reviews\n if (filtered_users != filtered_review_users) or (filtered_businesses != filtered_review_businesses):\n continue_filter = True\n\n # filter step 2\n # filter user and business\n # make user_friends_dict, only those users with lots of friends can be included\n user_friends_dict = {}\n for user in users:\n user_id = user['user_id']\n if user_id not in filtered_review_users:\n continue\n if not user['friends']:\n continue\n filtered_friends = [friend.strip() for friend in user['friends'].split(',') if friend.strip() in filtered_review_users]\n if len(filtered_friends) >= friend_thresh:\n user_friends_dict[user_id] = filtered_friends # users with friends larger than friend_thresh\n\n continue_inside = True\n while continue_inside:\n friends = {}\n continue_inside = False\n for user, user_friends in user_friends_dict.items():\n filtered_friends = [friend for friend in user_friends if friend in user_friends_dict] # friend in user_friends_dict's keys\n if len(filtered_friends) >= friend_thresh:\n friends[user] = filtered_friends\n else:\n continue_inside = True\n user_friends_dict = deepcopy(friends) # this takes time\n\n filtered_users = set(user_friends_dict.keys())\n filtered_businesses_list = []\n\n for business in businesses:\n business_id = business['business_id']\n if business_id not in filtered_review_businesses:\n continue\n if not business['categories']:\n continue\n if not business['city']:\n continue\n filtered_businesses_list.append(business_id)\n filtered_businesses = set(filtered_businesses_list)\n\n filtered_review = []\n for review in reviews:\n if (review['user_id'] in filtered_users) and (review['business_id'] in filtered_businesses):\n filtered_review.append(review)\n reviews = deepcopy(filtered_review) # this takes time\n\n print(len(list(filtered_users)))\n print(len(list(filtered_businesses)))\n print(len(reviews))\n print('filter loop')\n\n print('filter complete')\n return filtered_users, filtered_businesses, filtered_review",
"def find_likely_transfers(dataframe):\n\n df_likely = dataframe[dataframe[\"Transfer_frequency\"] >= 0.80]\n\n return(df_likely)",
"def twoCitySchedCost(self, costs: List[List[int]]) -> int:\n def abs_diff_compare(x=[1,2], y=[3,4]):\n x1=x[0]-x[1] if x[0]>=x[1] else x[1]-x[0]\n y1=y[0]-y[1] if y[0]>=y[1] else y[1]-x[0]\n return x1 if x1>y1 else y1\n save_costs=costs\n costs.sort(reverse=True, key=abs_diff_compare)\n print(costs)\n nA=0\n nB=0\n tc=0\n num_people=len(costs)\n max_ppl=num_people//2\n \n for x in costs:\n if nA< max_ppl and nB< max_ppl :\n if x[0] <x[1]:\n nA+=1\n tc+=x[0]\n else:\n nB+=1\n tc+=x[1]\n elif nA< max_ppl:\n nA+=1\n tc+=x[0]\n elif nB< max_ppl:\n nB+=1\n tc+=x[1]\n \n \n return tc",
"def get_elim_candidates(df2, df1):\n if df1.loc[1,'Problem']!= problems[0]:\n return\n \n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))\n # return their 1-base index also:\n out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]\n return out",
"def computeGoalKicks(my_goal, opponent_goal, ball, opponent, resolution):\n def avg(x):\n if len(x) == 0: return 0\n return sum(x)/len(x)\n\n def goal_score(goal, ball):\n goal_len = dist(*goal)\n D = dist(ball, goal[0]), dist(ball, goal[1])\n if min(D) > 200:\n return 0\n if D[0] <= goal_len and D[1] <= goal_len:\n return 1\n\n return 0\n if D[0] < D[1]:\n return D[0] / sum(D)\n else:\n return D[1] / sum(D)\n\n def scorefn(ball):\n my = goal_score(my_goal, ball)\n op = goal_score(opponent_goal, ball)\n print ball, my_goal, opponent_goal, my, op\n #assert my <= 0 or op <= 0\n return my + op\n\n successes = [ createRay(scorefn, resolution,\n opponent, ball, radians(angle), 1)\n for angle in range(360) ]\n\n w=0.5; n=9\n gauss1d = np.exp( -0.5 * w/n * np.array(range(-(n-1), n, 2))**2 )\n gauss1d /= sum(gauss1d)\n\n wraparound = int(np.ceil(n/2.0))\n wrapped = successes[-wraparound:] + successes + successes[:wraparound]\n convolved = np.convolve(gauss1d, successes, 'same')\n unwrapped = convolved[wraparound : len(successes)]\n\n return successes #unwrapped",
"def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool",
"def get_companies_and_people(team):",
"def _resolve_ball_collisions(self) -> bool:\n\n bln_naughty = True\n lng_naughty_loop_count = 0\n lng_naughty_loop_limit = 10\n while bln_naughty:\n lng_naughty_loop_count += 1\n if lng_naughty_loop_count > lng_naughty_loop_limit:\n return False\n bln_naughty = False\n\n \"\"\" Ball vs Ball \"\"\"\n for sprBall1, sprBall2 in TrashyPhysics.collision_pairs_self(\n self.grpBalls, fncCollided=TrashyPhysics.balls_collided):\n bln_naughty = True\n TrashyPhysics.bounce_balls(sprBall1, sprBall2)\n\n \"\"\" Ball vs Bot \"\"\"\n for sprBall, sprRobot in TrashyPhysics.collision_pairs(\n self.grpBalls, self.grpRobots,\n fncCollided=TrashyPhysics.ball_robot_collided):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_bot(sprRobot, sprBall)\n\n \"\"\" Ball vs Wall \"\"\"\n for ball in filter(lambda x: TrashyPhysics.collided_wall(x), self.lstBalls):\n bln_naughty = True\n TrashyPhysics.bounce_ball_off_wall(ball)\n\n \"\"\" Ball vs Bumper \"\"\"\n # todo\n\n return True",
"def get_target_prime_family(target_prime_family_value: int) -> Tuple[int, str, List[int]]:\n number_of_digits = 1\n while True:\n print(f'Checking {number_of_digits} digit numbers...')\n primes_list = get_sorted_n_digit_primes(number_of_digits)\n primes_set = set(primes_list)\n checked_prime_families: Set[str] = set()\n\n for prime_number in primes_list:\n for prime_family in _get_number_families(prime_number):\n if prime_family not in checked_prime_families:\n checked_prime_families.add(prime_family)\n prime_family_primes = list(_get_number_family_primes(prime_family, primes_set))\n prime_family_value = len(prime_family_primes)\n if prime_family_value == target_prime_family_value:\n return prime_family_primes[0], prime_family, prime_family_primes\n\n number_of_digits += 1",
"def test_expected_growth(self):\r\n\r\n graph = nx.lollipop_graph(4, 1)\r\n graph.add_edge(4, 2)\r\n\r\n c = [3, 4]\r\n result = clique.search(c, graph, iterations=100)\r\n assert result == [0, 1, 2, 3]",
"def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict"
]
| [
"0.67352563",
"0.5475269",
"0.5326035",
"0.5268679",
"0.5139634",
"0.50883186",
"0.5016358",
"0.5006616",
"0.49998176",
"0.4940894",
"0.49228308",
"0.49202496",
"0.4916913",
"0.49167457",
"0.49084124",
"0.49008963",
"0.4895143",
"0.4878588",
"0.486629",
"0.48580933",
"0.4832985",
"0.48211643",
"0.48168105",
"0.4777841",
"0.47738466",
"0.47710785",
"0.47683907",
"0.47668457",
"0.47559425",
"0.4755212"
]
| 0.6558399 | 1 |
If there are X company families currently running plan b, AND config throttles to Y company families, AND Y is less than or equal Y, then we should find exactly 0 company families to run. | def test_get_company_families_in_need_of_plan_b_negative(self):
self.mock.max_simultaneous_plan_bs = 5
self.mock.currently_running_companies = [[1, "run_id1"], [2, "run_id2"], [3, "run_id3"], [4, "run_id4"], [5, "run_id5"]]
self.mock.company_families_in_need_of_plan_b = None
self.mox.ReplayAll()
# run, baby!
CompanyAnalyticsPlanBRunner._get_company_families_in_need_of_plan_b(self.mock)
self.assertIsNone(self.mock.company_families_in_need_of_plan_b)
self.assertEqual(self.mock.max_simultaneous_plan_bs_running, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_company_families_in_need_of_plan_b_positive(self):\n self.mock.max_simultaneous_plan_bs = 5\n self.mock.currently_running_companies = [[1, \"run_id\"], [2, \"run_id\"], [3, \"run_id\"]]\n needs_plan_b_companies = [[4, None], [5, None], [6, None]]\n expected_number_of_tasks_to_create = 4\n expected_company_families_in_need_of_plan_b = [[4, None], [5, None]]\n\n self.mock._get_companies_in_need_of_plan_b().AndReturn(needs_plan_b_companies)\n self.mock._get_company_families_to_run(expected_number_of_tasks_to_create, needs_plan_b_companies).AndReturn(expected_company_families_in_need_of_plan_b)\n self.mox.ReplayAll()\n\n # run, baby!\n CompanyAnalyticsPlanBRunner._get_company_families_in_need_of_plan_b(self.mock)\n\n self.assertEqual(self.mock.company_families_in_need_of_plan_b, expected_company_families_in_need_of_plan_b)\n self.assertEqual(self.mock.max_simultaneous_plan_bs_running, False)",
"def get_available_companies_and_people(team):",
"def get_available_companies(team):",
"def __calculate_emptiness (self, scheduling_unit):\n difference = 0\n \n for date in self.workers.get_dates ( ):\n for person in self.mapper.get_all_people ( ):\n for turnus in self.mapper.get_turnuses (scheduling_unit, person):\n if person.is_scheduled_exact (scheduling_unit, turnus, date):\n difference += 1\n \n needed = self.workers.get_workers_by_type (date, scheduling_unit, turnus)\n \n difference -= needed\n return difference",
"def find_best_matching_url(self):\n\n if self.filter_kvks:\n overlap_kvk = self.company_df.index.intersection(set(self.filter_kvks))\n self.company_df = self.company_df.loc[overlap_kvk]\n\n # set flag for all kvk processed longer than older_time ago\n delta_time = self.current_time - self.company_df[DATETIME_KEY]\n mask = (delta_time >= self.older_time) | delta_time.isna()\n if not self.force_process and not self.rescan_missing_urls:\n self.company_df = self.company_df[mask.values]\n\n self.logger.info(\"Start finding best matching urls for proc {}\".format(self.i_proc))\n\n # count the number of none-processed queries (ie in which the processed flag == False\n # we have already imposed the max_entries option in the selection of the ranges\n self.logger.info(\"Counting all...\")\n if self.maximum_entries:\n max_queries = self.maximum_entries\n else:\n max_queries = self.company_df.index.size\n self.logger.info(\"Start processing {} queries\".format(max_queries))\n\n if self.progressbar and self.showbar:\n pbar = tqdm(total=max_queries, position=self.i_proc, file=sys.stdout)\n pbar.set_description(\"@{:2d}: \".format(self.i_proc))\n else:\n pbar = None\n\n start = time.time()\n # loop over all the companies kvk numbers\n cnt = 0\n for index, row in self.company_df.iterrows():\n\n # first check if we do not have to stop\n if self.maximum_entries is not None and cnt == self.maximum_entries:\n self.logger.info(\"Maximum entries reached\")\n break\n if os.path.exists(STOP_FILE):\n self.logger.info(\"Stop file found. Quit processing\")\n os.remove(STOP_FILE)\n break\n\n kvk_nummer = index\n company_name = get_string_name_from_df(NAME_KEY, row, index, self.company_df)\n self.logger.info(\"Processing {} ({})\".format(kvk_nummer, company_name))\n\n cnt += 1\n\n if self.search_urls:\n self.logger.info(\"Start a URL search for this company first\")\n\n # for this kvk, get the list of urls + the address info\n company_urls_df = self.website_df[self.website_df[KVK_KEY] == kvk_nummer].reset_index()\n company_addresses_df = self.address_df[self.address_df[KVK_KEY] == kvk_nummer]\n\n try:\n # match the url with the name of the company\n company_url_match = \\\n CompanyUrlMatch(company_record=row,\n kvk_nr=kvk_nummer,\n company_name=company_name,\n current_time=self.current_time,\n company_urls_df=company_urls_df,\n company_addresses_df=company_addresses_df,\n urls_df=self.url_df,\n imposed_urls=self.impose_url_for_kvk,\n distance_threshold=self.threshold_distance,\n string_match_threshold=self.threshold_string_match,\n i_proc=self.i_proc,\n store_html_to_cache=self.store_html_to_cache,\n max_cache_dir_size=self.max_cache_dir_size,\n internet_scraping=self.internet_scraping,\n force_ssl_check=self.force_ssl_check,\n older_time=self.older_time,\n timezone=self.timezone,\n exclude_extension=self.exclude_extension,\n filter_urls=self.filter_urls,\n force_process=self.force_process,\n rescan_missing_urls=self.rescan_missing_urls,\n logger=self.logger\n )\n\n self.logger.debug(\"Done with {}\".format(company_url_match.company_name))\n except pw.DatabaseError as err:\n self.logger.warning(f\"{err}\")\n self.logger.warning(\"skipping\")\n else:\n # succeeded the match. Now update the sql tables atomic\n self.update_sql_tables(kvk_nummer, company_url_match)\n\n if pbar:\n pbar.update()\n\n if pbar is not None:\n pbar.close()\n\n duration = time.time() - start\n self.logger.info(f\"Done processing in {duration} seconds\")\n # this is not faster than save per record\n # with Timer(\"Updating tables\") as _:\n # query = (Company.update(dict(url=Company.url, processed=Company.processed)))\n # query.execute()",
"def check_count():\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n while True:\n try:\n for user in get_count_request():\n ip, count, protocol = str(user[0][0]), user[1][0], str(user[2][0])\n if count >= int(config[protocol]['Count Request']) and ip not in BLACK_LIST:\n BLACK_LIST.append(ip)\n logging.warning(ip)\n\n except Exception as e:\n logging.debug(e)",
"def slot_avaialble(application, approved_loans, K):\n return len(intersected_applications(application, approved_loans)) < K",
"def test_fav_6(self):\n\t\tplayer_list = [Player(\"Blake Base\", 1, 300000, 10), Player(\"Corey Catcher\", 2, 500000, 20), Player(\"Dexter Dugout\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 100000, 4), (0, 0, []) )",
"def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans",
"def _validateSpies(self, config, team, sabotaged):\r\n spies = [s for s in team if s in self.getSpies(config)]\r\n \"\"\"If there are more spies in our config than the number of sabotages made \r\n then return True, because this config is compatible with the sabotages made. \r\n Otherwise it is not compatible, so return False.\"\"\"\r\n return len(spies) >= sabotaged",
"def test_calculate_class_2_individuals_best_response_simulation_all_inds_in_one():\n all_individuals_to_first = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=0.1,\n lambda_1_2=3,\n mu_1=10,\n mu_2=2,\n num_of_servers_1=8,\n num_of_servers_2=4,\n threshold_1=6,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_first == 1\n\n all_individuals_to_second = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=3,\n lambda_1_2=0.1,\n mu_1=2,\n mu_2=10,\n num_of_servers_1=4,\n num_of_servers_2=8,\n threshold_1=3,\n threshold_2=6,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_second == 0",
"def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))",
"def _check_family(self):\n for (s, (b, c)), (cond, ref) in families.items():\n if s != self.SYMBOL or len(b) != self._.d:\n continue\n vars = tuple(set(sum(map(variables, b + c), ())))\n sols = _solve([SR(l) == r for l, r\n in zip(self._.b[:-1] + self._.c[1:], b + c)],\n vars)\n if any(checkConditions(cond, sol) for sol in sols\n if is_integral(sol)):\n raise InfeasibleError(refs=ref)",
"def numRescueBoats(self, people, limit):\n queue = collections.deque(sorted(people))\n count = 0\n while queue:\n count += 1\n last = queue.pop()\n if len(queue) >= 1:\n first = queue[0]\n if first + last <= limit:\n queue.popleft()\n return count",
"def test_no_counterfactuals_found(self):\n threshold = 4.0\n self._config['Regression threshold'] = str(threshold)\n self._example = {'x_1': 1.0, 'x_2': 1.0}\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertEmpty(output)",
"def test_exhaustive_search_multiprocessing(self):\n\n self.params.settings.processes = 4\n\n self.params.exhaustive.output.csv_name = os.path.join(\n self.params.output.out_dir, \"test.csv\"\n )\n self.params.exhaustive.options.step = 0.2\n exhaustive(self.params)\n bound_occ, u_iso, fofc = get_minimum_fofc(\n self.params.exhaustive.output.csv_name\n )\n self.assertAlmostEqual(0.6, bound_occ)\n self.assertAlmostEqual(0.4, u_iso)",
"def check_nonzero_counters(origin_champs_counters_to_buy_, champions_list_):\n logging.debug(\"Function check_nonzero_counters() called\")\n\n nonzero_counters_list = []\n nonzero_counters_number_list = []\n for i, champ_counter in enumerate(origin_champs_counters_to_buy_):\n if champ_counter.get() >= 1:\n nonzero_counters_list.append(champ_counter)\n nonzero_counters_number_list.append(i)\n if champ_counter.get() >= 2:\n nonzero_counters_list.append(champ_counter)\n nonzero_counters_number_list.append(i)\n if champ_counter.get() >= 3:\n nonzero_counters_list.append(champ_counter)\n nonzero_counters_number_list.append(i)\n if champ_counter.get() >= 4:\n nonzero_counters_list.append(champ_counter)\n nonzero_counters_number_list.append(i)\n logging.info(\"Nonzero counters list human readable: \")\n for champ_index in nonzero_counters_number_list:\n logging.info(\"%s\", champions_list_[champ_index].name)\n logging.info(\"Nonzero counters indexes(return): %s\", nonzero_counters_number_list)\n logging.info(\"This is nonzero Counter list: %s\", nonzero_counters_list)\n\n logging.debug(\"Function check_nonzero_counters() end\")\n return nonzero_counters_number_list",
"def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result",
"def ConstrRank():\n with open(path.join(MAIN_PATH, RANK)) as f:\n ranked_data = []\n for line in f:\n ranked_data.append(line.strip().split()[0]) \n\n threshold = 5000\n global rank_less\n global rank_more\n rank_less = ranked_data[:threshold]\n rank_more = ranked_data[threshold:]\n\n with open(path.join(MAIN_PATH, INST)) as f:\n for line in f:\n line = line.strip().split(\",\")\n exists.append(line[0:2])",
"def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False",
"def find_likely_transfers(dataframe):\n\n df_likely = dataframe[dataframe[\"Transfer_frequency\"] >= 0.80]\n\n return(df_likely)",
"def test_exhaustive_search(self):\n\n self.params.exhaustive.output.csv_name = os.path.join(\n self.params.output.out_dir, \"test.csv\"\n )\n self.params.exhaustive.options.step = 0.2\n exhaustive(self.params)\n bound_occ, u_iso, fofc = get_minimum_fofc(\n self.params.exhaustive.output.csv_name\n )\n self.assertAlmostEqual(0.6, bound_occ)\n self.assertAlmostEqual(0.4, u_iso)",
"def WCA_SA(targetMDG, WCAresult):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG, WCAresult))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n Temperature = 20\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, Temperature)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n if Temperature > 0:\n Temperature -= 0.5\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n max_climber.remove_empty_cluster()\n return max_climber.result",
"def fill_missing_channels(P8gen, max_total_br, decay_chains, epsilon=1e-6):\n top_level_particles = get_top_level_particles(decay_chains)\n for particle in top_level_particles:\n my_total_br = compute_total_br(particle, decay_chains)\n remainder = 1 - my_total_br / max_total_br\n assert(remainder > -epsilon)\n assert(remainder < 1 + epsilon)\n if remainder > epsilon:\n add_dummy_channel(P8gen, particle, remainder)",
"def matches(self, data, config):\n cnt = 0L\n for x in data:\n if config <= x:\n cnt += 1\n return cnt",
"def look_ahead(self, a, k, board):\n for i in range(board.dim):\n for j in range(board.dim):\n if not board.m[i][j]:\n if not self.localcount(a, k, board, pos=(j, i)):\n return []\n return self.localcount(a, k, board)",
"def test_sync_call_healthy_only(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n results.extend(manager.foreach_actor(lambda w: w.call()).ignore_errors())\n # Wait for actors to recover.\n wait_for_restore()\n\n # Notice that since we only fire calls against healthy actors,\n # we wouldn't be aware that the actors have been recovered.\n # So once an actor is taken out of the lineup (10% chance),\n # it will not go back in, and we should have few results here.\n # Basically takes us 7 calls to kill all the actors.\n # Note that we can hardcode 10 here because we are using deterministic\n # sequences of random numbers.\n self.assertEqual(len(results), 7)\n\n manager.clear()",
"def test_limits():\n # generate two locusts of points\n npts = 100\n epsilon = 0.000\n # #cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # weighting 1\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 1, alignment=\"parallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected parallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 1.0 * counts[-1], rtol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 1, alignment=\"perpendicular\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected perpendicular result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 0.0 * counts[-1], atol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 1, alignment=\"antiparallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected antiparallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], -1.0 * counts[-1], rtol=1.0 / npts), msg\n\n # weighting 2\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 2, alignment=\"parallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected parallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 1.0 * counts[-1], rtol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 2, alignment=\"perpendicular\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected perpendicular result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], -1.0 * counts[-1], atol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 2, alignment=\"antiparallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected antiparallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 1.0 * counts[-1], rtol=1.0 / npts), msg\n\n # weighting 3\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 3, alignment=\"parallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected parallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 0.0 * counts[-1], atol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 3, alignment=\"perpendicular\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected perpendicular result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 0.0 * counts[-1], atol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 3, alignment=\"antiparallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected antiparallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 0.0 * counts[-1], atol=1.0 / npts), msg\n\n # weighting 4\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 4, alignment=\"parallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected parallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 1.0 * counts[-1], rtol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 4, alignment=\"perpendicular\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected perpendicular result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 0.0 * counts[-1], atol=1.0 / npts), msg\n\n weighted_counts, counts = compute_limiting(\n coords1, coords2, npts, rbins, 4, alignment=\"antiparallel\"\n )\n msg = (\n \"weighted counts do not match \"\n \"expected antiparallel result given the weighting function\"\n + str(weighted_counts[-1])\n + \" \"\n + str(counts[-1])\n )\n assert np.isclose(weighted_counts[-1], 1.0 * counts[-1], rtol=1.0 / npts), msg",
"def validate_empty_branches(nanowire, min_free_branch, msg):\n score = 0\n valid = False\n\n for intersection in nanowire:\n free_b = 0\n for branch in intersection:\n min_free_pos = len(branch)\n free_p = 0\n for tup in branch:\n if not isinstance(tup, dict):\n continue\n if list(tup.values())[0] == 0:\n free_p += 1\n else:\n free_p = 0\n if free_p>=min_free_pos:\n free_b += 1\n if free_b>=min_free_branch:\n valid = True\n\n if valid:\n score += 1\n # if score==0:\n # raise exception.NoEmptyBranchException(msg)\n return score",
"def solve_p1_v1(target: int) -> int:\n\n elves = DeliveryService()\n\n house, n_presents = 0, 0\n while n_presents < target:\n house += 1\n n_presents = elves.visit(house)\n print(house, n_presents)\n\n return house"
]
| [
"0.6650879",
"0.53565973",
"0.5267262",
"0.5114545",
"0.503734",
"0.5020609",
"0.5002152",
"0.49946842",
"0.49886012",
"0.49717385",
"0.4968225",
"0.49662775",
"0.49640682",
"0.49217176",
"0.48952708",
"0.4893934",
"0.48644662",
"0.48617876",
"0.48533925",
"0.48433372",
"0.4838883",
"0.4830327",
"0.4827692",
"0.48212275",
"0.4803459",
"0.48022893",
"0.4793909",
"0.47926947",
"0.4791617",
"0.47874683"
]
| 0.70370907 | 0 |
Gets alignment alphabet for codon alignment. Only nucleotide alphabet is accepted. Raise an error when the type of alphabet is incompatible. | def get_codon_alphabet(alphabet, gap="-", stop="*"):
from Bio.Alphabet import NucleotideAlphabet
if isinstance(alphabet, NucleotideAlphabet):
alpha = alphabet
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
else:
raise TypeError("Only Nuclteotide Alphabet is accepted!")
return alpha | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alphabet(self):\n if(self.seq_type.upper()==\"DNA\"):\n return \"ATCG\"\n elif(self.seq_type.upper()==\"RNA\"):\n return \"AUCG\"\n elif(self.seq_type.upper()==\"PROTEIN\"):\n return \"ACDEFGHIKLMNPQRSTVWY\"\n else:\n return None",
"def alphabet(self):\r\n if self.__base_alphabet is None:\r\n from pydsl.Alphabet import Encoding\r\n self.__base_alphabet = Encoding(\"ascii\")\r\n return self.__base_alphabet",
"def get_alphabet(self):\n return self.alphabet",
"def getAlphabet(self):\n return self.alpha",
"def getAlphabet(self):\n return self.alpha",
"def getAlphabet(self):\n return self.alpha",
"def guess_align(aln):\n \n if \"pep\" in [guess_seq(seq) for seq in aln.itervalues()]:\n return \"pep\"\n else:\n return \"dna\"",
"def coding_strand_to_AA(dna):\n dna_codons = split_into_codons(dna)\n i = 0\n aa_string = ''\n while i < len(dna_codons):\n if len(dna_codons[i]) == 3:\n amino_acid = aa_table[dna_codons[i]]\n aa_string += amino_acid\n i += 1\n return aa_string",
"def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein",
"def coding_strand_to_AA(dna):\n amino_acid = ''\n i = 0\n while i + 3 < len(dna) + 1:\n amino_acid += aa_table[dna[i:i+3]]\n i += 3\n return amino_acid",
"def coding_strand_to_AA(dna):\n list1 = get_codons(dna)\n string = ''\n for codon in list1:\n try:\n string = string + aa_table[codon]\n except KeyError:\n continue\n return string",
"def coding_strand_to_AA(dna):\n protein=''\n for i in range(0,len(dna),3):\n\t if dna[i:i+3] in aa_table.keys():\n\t \tprotein += aa_table[dna[i:i+3]]\n return protein",
"def coding_strand_to_AA(dna):\n dnainp = dna\n protein = ''\n if len(dnainp)<3:\n return \"ERROR: The provided fragment is too short to contain any codons.\"\n# elif len(dnainp)%3 is not 0:\n# print \"Warning: The provided DNA fragment does not contain an integer number of codons. Excess bases were leftout.\"\n while len(dnainp) >=3:\n cod = dnainp[:3]\n for i in codons:\n for j in i:\n if j == cod:\n protein = protein + aa[codons.index(i)]\n dnainp = dnainp[3:]\n return protein",
"def get_alphabet(number):\n return chr(number + 96)",
"def get_alphabet(number):\n return chr(number + 96)",
"def get_acid_name(seq):\n term_list = []\n for i in __kmers(seq,k=3):\n res = __get_key(i,full_amino_acid_name)\n term_list.append(res)\n return ''.join(term_list)",
"def coding_strand_to_AA(dna):\n coding_strand = ''\n for i in range(len(dna)/3):\n aa = dna[3*i:(3*i)+3]\n coding_strand += aa_table[aa]\n return coding_strand",
"def coding_strand_to_AA(dna):\n amino_acid=\"\"\n for i in range(0, len(dna), 3):\n mycodon=dna[i:i+3]\n # print'this is my codon'\n #print mycodon\n for j in range(len(codons)):\n for k in range(len(codons[j])):\n #print codons[j][k]\n if codons[j][k] == mycodon:\n #print aa[j]\n amino_acid += aa[j]\n return amino_acid\n \n #step uno break apart string into groups of three\n #find sequence +find index\n #then connect to amino acids ",
"def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res",
"def checkAlphabet(self, count=10):\n if six.PY3:\n readLetters = super().checkAlphabet(count)\n else:\n readLetters = Read.checkAlphabet(self, count)\n if len(self) > 10 and readLetters.issubset(set('ACGT')):\n raise ValueError('It looks like a DNA sequence has been passed to '\n 'AARead().')\n return readLetters",
"def alphabet_chars(alphabet: List[Alphabet]):\n # perfect fits\n if np.sum([a in alphabet for a in [Alphabet.PERFECT_GW, Alphabet.PERFECT_IAM, Alphabet.PERFECT_RIMES]]) > 1:\n raise ValueError('multiple perfect fitting alphabets defeat purpose')\n if Alphabet.PERFECT_GW in alphabet:\n return ALPHABET_PERFECT_GW\n if Alphabet.PERFECT_IAM in alphabet:\n return ALPHABET_PERFECT_IAM\n if Alphabet.PERFECT_RIMES in alphabet:\n return ALPHABET_PERFECT_RIMES\n # universal alphabets\n alph_str = ''\n if Alphabet.ASCII_LOWER in alphabet:\n alph_str += string.ascii_lowercase\n if Alphabet.ASCII_UPPER in alphabet:\n alph_str += string.ascii_uppercase\n if Alphabet.ASCII_DIGITS in alphabet:\n alph_str += string.digits\n if Alphabet.ASCII_PUNCTUATION in alphabet:\n alph_str += string.punctuation\n return alph_str",
"def get_alphabet():\n\n alphabet = {}\n # Organized by how final output will look. ...alternative org isn't much better\n # May want to look into an external font solution TBH\n # Beware, the \" \" char is also basically the padding\n alphabet[\" \"] = [o,\n o,\n o,\n o,\n o]\n alphabet[\"A\"] = [o + X + o,\n X + o + X,\n X + X + X,\n X + o + X,\n X + o + X]\n alphabet[\"B\"] = [X + X + o,\n X + o + X,\n X + X + o,\n X + o + X,\n X + X + o]\n alphabet[\"C\"] = [X + X + X,\n X + o + o,\n X + o + o,\n X + o + o,\n X + X + X]\n alphabet[\"D\"] = [X + X + o,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + o]\n alphabet[\"E\"] = [X + X + X,\n X + o + o,\n X + X + X,\n X + o + o,\n X + X + X]\n alphabet[\"F\"] = [X + X + X,\n X + o + o,\n X + X + o,\n X + o + o,\n X + o + o]\n alphabet[\"G\"] = [X + X + X + X,\n X + o + o + o,\n X + o + X + X,\n X + o + o + X,\n X + X + X + X]\n alphabet[\"H\"] = [X + o + X,\n X + o + X,\n X + X + X,\n X + o + X,\n X + o + X]\n alphabet[\"I\"] = [X + X + X,\n o + X + o,\n o + X + o,\n o + X + o,\n X + X + X]\n alphabet[\"J\"] = [o + o + X,\n o + o + X,\n o + o + X,\n X + o + X,\n o + X + o]\n alphabet[\"K\"] = [X + o + o + X,\n X + o + X + o,\n X + X + o + o,\n X + o + X + o,\n X + o + o + X]\n alphabet[\"L\"] = [X + o + o,\n X + o + o,\n X + o + o,\n X + o + o,\n X + X + X]\n alphabet[\"M\"] = [X + o + o + o + X,\n X + X + o + X + X,\n X + o + X + o + X,\n X + o + o + o + X,\n X + o + o + o + X]\n alphabet[\"N\"] = [X + o + o + X,\n X + o + o + X,\n X + X + o + X,\n X + o + X + X,\n X + o + o + X]\n alphabet[\"O\"] = [X + X + X,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + X]\n alphabet[\"P\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + o + o,\n X + o + o]\n alphabet[\"Q\"] = [X + X + X,\n X + o + X,\n X + o + X,\n X + X + X,\n o + o + X]\n alphabet[\"R\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + X + o,\n X + o + X]\n alphabet[\"S\"] = [X + X + X,\n X + o + o,\n X + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"T\"] = [X + X + X,\n o + X + o,\n o + X + o,\n o + X + o,\n o + X + o]\n alphabet[\"U\"] = [X + o + X,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + X]\n alphabet[\"V\"] = [X + o + X,\n X + o + X,\n X + o + X,\n o + X + o,\n o + X + o]\n alphabet[\"W\"] = [X + o + o + o + X,\n X + o + X + o + X,\n X + o + X + o + X,\n X + o + X + o + X,\n o + X + o + X + o]\n alphabet[\"X\"] = [X + o + X,\n X + o + X,\n o + X + o,\n X + o + X,\n X + o + X]\n alphabet[\"Y\"] = [X + o + X,\n X + o + X,\n o + X + o,\n o + X + o,\n o + X + o]\n alphabet[\"Z\"] = [X + X + X,\n o + o + X,\n o + X + o,\n X + o + o,\n X + X + X]\n alphabet[\"1\"] = [X + X + o,\n o + X + o,\n o + X + o,\n o + X + o,\n X + X + X]\n alphabet[\"2\"] = [X + X + X,\n o + o + X,\n X + X + X,\n X + o + o,\n X + X + X]\n alphabet[\"3\"] = [X + X + X,\n o + o + X,\n o + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"4\"] = [X + o + X,\n X + o + X,\n X + X + X,\n o + o + X,\n o + o + X]\n alphabet[\"5\"] = [X + X + X,\n X + o + o,\n X + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"6\"] = [X + X + X,\n X + o + o,\n X + X + X,\n X + o + X,\n X + X + X]\n alphabet[\"7\"] = [X + X + X,\n o + o + X,\n o + o + X,\n o + X + o,\n o + X + o]\n alphabet[\"8\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + o + X,\n X + X + X]\n alphabet[\"9\"] = [X + X + X,\n X + o + X,\n X + X + X,\n o + o + X,\n o + o + X]\n alphabet[\"0\"] = [X + X + X + X + X,\n X + o + o + X + X,\n X + o + X + o + X,\n X + X + o + o + X,\n X + X + X + X + X]\n\n return alphabet",
"def getCode1Letter(self):\n dataDict = self.__dict__\n cc = self.stdChemComp\n if cc is None:\n result = None\n else:\n result = cc.code1Letter\n return result",
"def GetAlphabet(self):\n alphabet = list(self._charAlphabet) #Creates a list of the alphabet characters\n numbers = [i for i in range(0,26)] #Creates a list of numbers up to 25\n numberOff = dict( zip(alphabet, numbers)) #Pairs each character with a number in a chronological sequence to number the characters from 0 to 25\n \n return numberOff",
"def coding_strand_to_AA(dna):\n num_codons = int(len(dna)/3)\n num = 0\n list_codons = []\n aacids = ''\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna[num_start:num_end])\n num = num + 1\n for element in list_codons:\n thing = aa_table[element]\n aacids = aacids + thing\n return aacids",
"def coding_strand_to_AA(dna):\n Seq = ''\n for i in range(0,len(dna),3): \n triple = dna[i:i+3]\n print triple\n for k in range(len(codons)):\n if triple in codons[k]: \n print \"Casey Rocks\"\n print codons[k]\n amino = aa[k]\n Seq+=amino\n return Seq",
"def coding_strand_to_AA(dna):\n s = \"\"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n for i in range(0, len(dna)-2, 3):\t\t\t\t\t\t\t\t\t\t\t\t\t\t# for range of length of dna, indexes w/ step 3 (to isolate codons)\n \t\tamino_acid = aa_table[dna[i:i+3]]\t\t\t\t\t\t\t\t\t\t\t\t# translates each codon to an amino acid\n \t\ts = s + amino_acid \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# adds amino acid to list\n return s \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list of amino acids",
"def coding_strand_to_AA(dna):\n l = len(dna)\n res = []\n for i in range(0, l, 3):\n s = dna[i: i + 3]\n for j in range(len(codons)):\n# for codon in codons[j]:\n# if codon == s:\n# res.append(aa[j])\n# break;\n if s in codons[j]: # [WOW] Python is really nice unlike C, yay!!\n res.append(aa[j])\n return collapse(res)",
"def get_alphabet(choice) -> str:\n if choice == 'uppercase':\n return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n elif choice == 'lowercase':\n return 'abcdefghijklmnopqrstuvwxyz'\n elif choice == 'alphabet':\n return get_alphabet('uppercase') + get_alphabet('lowercase')\n elif choice == 'number':\n return '0123456789'\n elif choice == 'alphanumeric':\n return get_alphabet('alphabet') + get_alphabet('number')\n elif choice == 'symbol':\n return r'~!@#$%^&*()-_=+[]{}\\|;:,<.>/?\"'\n elif choice == 'random':\n return get_alphabet('alphanumeric') + get_alphabet('symbol')",
"def rep_to_alphabet(alph_chars: str):\n # perfect fits\n if np.sum([a in alph_chars for a in ['gw', 'iam', 'rimes']]) > 1:\n raise ValueError('multiple perfect fitting alphabets defeat purpose')\n if 'gw' in alph_chars:\n return [Alphabet.PERFECT_GW]\n if 'iam' in alph_chars:\n return [Alphabet.PERFECT_IAM]\n if 'rimes' in alph_chars:\n return [Alphabet.PERFECT_RIMES]\n # universal alphabets\n alph_chars = alph_chars.lower()\n mapping = {'l': Alphabet.ASCII_LOWER,\n 'u': Alphabet.ASCII_UPPER,\n 'p': Alphabet.ASCII_PUNCTUATION,\n 'd': Alphabet.ASCII_DIGITS}\n alphabet = set()\n for c in alph_chars:\n alphabet.add(mapping[c])\n alphabet = list(alphabet)\n return alphabet"
]
| [
"0.73950106",
"0.6471855",
"0.64095986",
"0.62704057",
"0.62704057",
"0.62704057",
"0.624696",
"0.6093315",
"0.6060211",
"0.605598",
"0.60510933",
"0.6047748",
"0.5968646",
"0.5776322",
"0.5776322",
"0.5757764",
"0.5732608",
"0.5713657",
"0.5650256",
"0.56352204",
"0.56350553",
"0.55796456",
"0.55292517",
"0.55018884",
"0.5490156",
"0.5488772",
"0.54840547",
"0.53969425",
"0.53893656",
"0.534559"
]
| 0.66349137 | 1 |
Choose which generator and discriminator architecture to use by uncommenting one of these lines. | def GeneratorAndDiscriminator():
# Baseline (G: DCGAN, D: DCGAN)
return ResnetGenerator, DCGANDiscriminator
# No BN and constant number of filts in G
# return WGANPaper_CrippledDCGANGenerator, DCGANDiscriminator
# 512-dim 4-layer ReLU MLP G
# return FCGenerator, DCGANDiscriminator
# No normalization anywhere
# return functools.partial(DCGANGenerator, bn=False), functools.partial(DCGANDiscriminator, bn=False)
# Gated multiplicative nonlinearities everywhere
# return MultiplicativeDCGANGenerator, MultiplicativeDCGANDiscriminator
# tanh nonlinearities everywhere
# return functools.partial(DCGANGenerator, bn=True, nonlinearity=tf.tanh), \
# functools.partial(DCGANDiscriminator, bn=True, nonlinearity=tf.tanh)
# 101-layer ResNet G and D
# return ResnetGenerator, ResnetDiscriminator
raise Exception('You must choose an architecture!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator(self) -> str:",
"def discriminator(self) -> Any:\r\n return None",
"def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)",
"def architecture(self):\n return self.random.choice([\n 'x86_64', \n 'x86'\n ])",
"def choose_net_arch(arch):\n model = getattr(models, arch)\n return model(pretrained=True)",
"def discriminator(self) -> undefined.UndefinedOr[str]:",
"def define_discriminator(image_shape=(256, 256, 1)):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # source image input\n in_src_image = Input(shape=image_shape)\n # target image input\n in_target_image = Input(shape=image_shape)\n # concatenate images channel-wise\n merged = Concatenate()([in_src_image, in_target_image])\n # C64\n d = Conv2D(64, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(merged)\n d = LeakyReLU(alpha=0.2)(d)\n # C128\n d = Conv2D(128, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C256\n d = Conv2D(256, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # C512\n d = Conv2D(512, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # second last output layer\n d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)\n d = BatchNormalization()(d)\n d = LeakyReLU(alpha=0.2)(d)\n # patch output\n d = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)\n patch_out = Activation('sigmoid')(d)\n # define model\n model = Model([in_src_image, in_target_image], patch_out)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt,\n loss_weights=[0.5])\n\n return model",
"def build_discriminator(self):\n img_shape = (self.img_size[0], self.img_size[1], self.channels)\n\n model = Sequential()\n ###############\n # Conv Stack 1:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, input_shape=img_shape, padding=\"same\")\n ) # 128x128 -> 64x64\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.2))\n\n ###############\n # Conv Stack 2:\n ###############\n model.add(\n Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n ) # 64x64 -> 32x32\n # model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 3:\n ###############\n model.add(\n Conv2D(128, kernel_size=4, strides=2, padding=\"same\")\n ) # 32x32 -> 16x16\n\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 4:\n ###############\n model.add(Conv2D(128, kernel_size=4, strides=1, padding=\"same\")) # 16x16 -> 8x8\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n # model.add(Dropout(0.25))\n\n ###############\n # Conv Stack 5:\n ###############\n model.add(Conv2D(128, kernel_size=3, strides=1, padding=\"same\")) # 8x8 -> 4x4\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dropout(0.4))\n\n model.add(Flatten())\n model.add(Dense(1, activation=\"sigmoid\")) # important binary classification.\n\n model.summary()\n\n # Model require Pair.\n img = Input(shape=img_shape)\n validity = model(img)\n\n return Model(img, validity)",
"def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def disresnet18(**kwargs):\n return Discriminator(resnetblocks.EresNetBasicBlock, resnetblocks.DresNetBasicBlock, [2, 2, 2, 2], **kwargs)",
"def discriminator(self, discriminator: str):\n pass # setter is ignored for discriminator property",
"def setup_training_discriminator(model):\n train_dir = os.path.join(FLAGS.log_root, \"train-discriminator\")\n if not os.path.exists(train_dir): os.makedirs(train_dir)\n\n model.build_graph() # build the graph\n\n saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding\n sess = tf.Session(config=util.get_config())\n #init = tf.global_variables_initializer()\n #sess.run(init)\n util.load_ckpt(saver, sess, ckpt_dir=\"train-discriminator\")\n\n\n\n return sess, saver,train_dir",
"def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator",
"def example():\n base_path = Path(TMPDIR)\n\n discriminator = Model(num_input=28 * 28)\n discriminator.add(Layer(512, activation=af.RELU))\n discriminator.add(Layer(1, activation=af.SIGMOID))\n\n generator_discriminator = Model(num_input=100)\n generator_discriminator.add(Layer(512, activation=af.LEAKY_RELU))\n generator_discriminator.add(Layer(28 * 28, activation=af.SIGMOID))\n generator_discriminator.add(Layer(512, activation=af.RELU)) # Needs to match discriminator\n generator_discriminator.add(Layer(1, activation=af.SIGMOID)) # Needs to match discriminator\n\n nn_discriminator = NeuralNetwork(discriminator, learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY,\n\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32)\n\n discriminator_weight_path = Path(DISCRIMINATOR_WEIGHTS_FILE_PATH)\n if discriminator_weight_path.exists():\n log.info(\"Discriminator weight file detected. Loading.\")\n nn_discriminator.load(discriminator_weight_path)\n\n nn_generator_discriminator = NeuralNetwork(generator_discriminator,\n use_layer_from=[{\"model\": nn_discriminator,\n \"layer_map\": [{\"from\": 1, \"to\": 3},\n {\"from\": 2, \"to\": 4}]}],\n\n learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY, # Slower than D\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32,\n weight_parameter=wparam(init_type=wparam.NORMAL, stddev=0.02))\n\n generator_weight_path = Path(GENERATOR_WEIGHTS_FILE_PATH)\n if generator_weight_path.exists():\n log.info(\"Generator weight file detected. Loading.\")\n nn_generator_discriminator.load(generator_weight_path)\n\n noise = np.random.normal(size=(NUM_IMAGES_TO_GENERATE, 100))\n\n print(\"Generating...\")\n test_images = nn_generator_discriminator.predict_intermediate(noise, 2)\n\n for p in range(test_images.shape[0]):\n img = test_images[p].reshape((28, 28)).copy()\n img *= 255.0\n img_pil = Image.fromarray(np.uint8(img))\n image_path = base_path / Path(\"%d.jpg\" % (p))\n img_pil.save(image_path)",
"def discriminator(self) -> str:\n return self.__class__.__name__",
"def define_discriminator(sample_size, code_size, hidden_size=50):\r\n input_1 = Input(shape=(sample_size, ))\r\n input_2 = Input(shape=(code_size, ))\r\n inputs = concatenate([input_1, input_2])\r\n # Define the discriminator Layers\r\n d = Dense(hidden_size, kernel_initializer='he_uniform', activation='tanh')(inputs)\r\n out_classifier = Dense(1, kernel_initializer='he_uniform', activation=\"sigmoid\")(d)\r\n d_model = Model([input_1, input_2], out_classifier)\r\n d_model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0005, beta_1=0.5))\r\n return d_model",
"def iterate_model_architecture_configs(options):\n for model_architecture in options[consts.MODEL_ARCHITECTURE]:\n config = options.copy()\n config[consts.MODEL_ARCHITECTURE] = model_architecture\n yield config",
"def make_architecture(self):\n self.arch = simple_mlp(num_inputs=self.p.model.num_inputs,\n num_outputs=self.p.model.num_outputs,\n params=self.p.model.arch)",
"def disresnet34(**kwargs):\n return Discriminator(resnetblocks.DresNetBasicBlock, [3, 4, 6, 3], **kwargs)",
"def discriminator_model_organs():\n # Initialize the weights\n init = tf.random_normal_initializer(0.0, 0.02)\n\n img_shape = (512, 512, 1)\n\n # Source and target image input\n source_img = tf.keras.Input(shape=img_shape)\n target_img = tf.keras.Input(shape=img_shape)\n\n # Concatenate images channel-wise\n src_tgt_img = Concatenate()([source_img, target_img]) # L: 512 x 512 x 1 # G: 256 x 256 x 1\n\n # C128\n d1 = Conv2D(filters=128, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n src_tgt_img) # L: 256 x 256 x 128 # G: 128 x 128 x 128 # RF: 4\n d1 = LeakyReLU(alpha=0.2)(d1)\n\n # C256\n d2 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d1) # L: 128 x 128 x 256 # G: 64 x 64 x 256 # RF: 10\n d2 = BatchNormalization()(d2)\n d2 = LeakyReLU(alpha=0.2)(d2)\n\n # C256\n d3 = Conv2D(filters=256, kernel_size=(4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(\n d2) # L: 64 x 64 x 256 # G: 32 x 32 x 256 # RF: 22\n d3 = BatchNormalization()(d3)\n d3 = LeakyReLU(alpha=0.2)(d3)\n\n # C512\n d4 = Conv2D(filters=512, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d3) # L: 61 x 61 x 512 # G: 29 x 29 x 512 # RF: 46\n d4 = BatchNormalization()(d4)\n d4 = LeakyReLU(alpha=0.2)(d4)\n d4 = ZeroPadding2D()(d4) # L: 63 x 63 x 512 # G: 31 x 31 x 512\n\n # Patch output\n d5 = Conv2D(filters=1, kernel_size=(4, 4), strides=(1, 1), padding='valid', kernel_initializer=init)(\n d4) # L: 60 x 60 x 1 # G: 28 x 28 x 1 # RF: 70\n output_patch = Activation('sigmoid')(d5)\n\n # Define model\n discriminator_model = tf.keras.Model([source_img, target_img], output_patch)\n return discriminator_model",
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator",
"def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )",
"def __determine_config_type():",
"def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
"def disresnet152(**kwargs):\n return Discriminator(resnetblocks.DresNetBottleneck, [3, 8, 36, 3], **kwargs)",
"def setup_net(self):\n\t\tself.src_net = get_model(self.base_model, num_cls=self.num_cls, \\\n\t\t\t\t\t\t\t\t l2_normalize=self.l2_normalize, temperature=self.temperature)\n\t\tself.tgt_net = self.custom_copy(self.src_net, self.weight_sharing)\n\n\t\tinput_dim = self.num_cls\n\t\tself.discriminator = nn.Sequential(\n\t\t\t\tnn.Linear(input_dim, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 500),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.Linear(500, 2),\n\t\t\t\t)\n\n\t\tself.image_size = self.src_net.image_size\n\t\tself.num_channels = self.src_net.num_channels",
"def __init__(self, num_gpus):\n\n super(Discriminator, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # input is image\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = 1\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n nn.Sigmoid()\n )"
]
| [
"0.6232599",
"0.5788706",
"0.568151",
"0.56580734",
"0.56220806",
"0.556002",
"0.5512114",
"0.55085623",
"0.5438574",
"0.5433415",
"0.5423314",
"0.54211974",
"0.5416163",
"0.5398142",
"0.5363023",
"0.5358023",
"0.53553355",
"0.53334403",
"0.5310332",
"0.528972",
"0.5268406",
"0.5267039",
"0.5259861",
"0.5259861",
"0.5244364",
"0.52378976",
"0.5227908",
"0.5206359",
"0.5198257",
"0.5198028"
]
| 0.6607731 | 0 |
Test case for add_relation_type | def test_add_relation_type(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_change_relation_types(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def test_find_relation_types(self):\n pass",
"def test_remove_relation_type(self):\n pass",
"def test_remove_relation_types(self):\n pass",
"def test07_add_type_triples(self):\n r = LDPRS('http://ex.org/abc')\n g = Graph()\n r.add_type_triples(g)\n self.assertEqual(len(g), 2)",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def addRelation(klass, relation, rtype):\n if type(relation) is dict:\n if not relation.has_key('name'):\n msg = \"No key 'name' in the relation %s in class %s\" % (relation, klass.__name__)\n raise InvalidRelationshipError, msg\n name = relation['name']\n args = relation\n else:\n name = relation\n args = {}\n relationshipKlass = Relationship.TYPES[rtype]\n klass.RELATIONSHIP_CACHE[name] = (relationshipKlass, args)",
"def insert_rel(source, rel_type, target, change_list=None):\n print(\"Insert %s =%s=> %s\" % (source.id, rel_type, target.id))\n ss = source\n if [r for r in ss.synset_relations if r.target ==\n target.id and r.rel_type == rel_type]:\n print(\"Already exists\")\n return\n ss.synset_relations.append(SynsetRelation(target.id, rel_type))\n if change_list:\n change_list.change_synset(target)",
"def _create_new_relation_concept(self, rc_type, data_dict):\n # generate name, create individual with role assignments\n i = self.auto_generated_name_numbers[rc_type]\n self.auto_generated_name_numbers[rc_type] += 1\n relation_name = f\"i{rc_type.name}_{i}\"\n\n kwargs = {}\n for key, value in data_dict.items():\n res = self._handle_key_for_individual(key, value, relation_name, None)\n if res is not None:\n kwargs.update(res)\n\n relation_individual = self._create_individual(rc_type, relation_name, relation_name, label=None, kwargs=kwargs)\n\n return relation_individual",
"def test_add_asset_type_assignment_rule(self):\n pass",
"def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})",
"def test_related_add_existing_record(app, related_record, testdata):\n # Test language\n docs = testdata[\"documents\"]\n\n doc6 = Document.get_record_by_pid(docs[4][\"document_pid\"])\n doc9 = Document.get_record_by_pid(docs[7][\"document_pid\"])\n assert len(doc6.related.languages) == len(doc9.related.languages) == 2\n with pytest.raises(RelatedRecordError):\n doc6.related.add_language(doc9)\n assert len(doc6.related.languages) == len(doc9.related.languages) == 2\n\n # Test edition\n doc3 = Document.get_record_by_pid(docs[2][\"document_pid\"])\n doc4 = Document.get_record_by_pid(docs[3][\"document_pid\"])\n assert len(doc3.related.editions) == len(doc4.related.editions) == 4\n with pytest.raises(RelatedRecordError):\n doc4.related.add_edition(doc3)\n assert len(doc3.related.editions) == len(doc4.related.editions) == 4",
"def add_resource_relation_by_user(self, *, id: str,\n user_id: str,\n relation_type: UserResourceRel,\n resource_type: ResourceType) -> None:\n if resource_type not in resource_relation_model:\n raise NotImplementedError(f'The resource_type {resource_type.name} is not defined!')\n\n if relation_type not in resource_relation_model[resource_type]:\n raise NotImplementedError(f'the relation type {relation_type} is not defined!')\n\n res_rel_model = resource_relation_model[resource_type][relation_type]\n res_key = f'{resource_type.name.lower()}_rk'\n\n user_record = RDSUser(rk=user_id, email=user_id)\n res_rel_record = res_rel_model(user_rk=user_id)\n res_rel_record.__setattr__(res_key, id)\n try:\n with self.client.create_session() as session:\n session.merge(user_record)\n session.merge(res_rel_record)\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to create relation between user {user_id} and resource {id}')\n raise e",
"def test_single_named_link_with_custom_type():\n pass",
"def add_relation(wn, source, target, new_rel, change_list=None):\n insert_rel(source, new_rel, target, change_list)\n if new_rel in inverse_synset_rels:\n inv_rel_type = inverse_synset_rels[new_rel]\n insert_rel(target, inv_rel_type, source, change_list)",
"def _AddType(self, entity_type):\n if not entity_type.IsValid():\n self.AddFindings(entity_type.GetFindings())\n return False\n return self.local_namespace.InsertType(entity_type)",
"def test_related_add_existing_child(related_record, testdata):\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc6 = Document.get_record_by_pid(docs[4][\"document_pid\"])\n ser10 = Series.get_record_by_pid(series[1][\"series_pid\"])\n\n # Should fail if trying to add a child that already has relations\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc1)\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc6)",
"def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)",
"def test_ticket_type_add_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type add new_type')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_related_add_same_language(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n\n doc1.related.add_language(doc2)\n with pytest.raises(RelatedRecordError):\n doc1.related.add_language(doc2)",
"def test_relation_ways_inserted():\n park = query_row(db_conf, 'osm_landusages', -9201)\n assert park['type'] == 'park'\n assert park['name'] == '9209'\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9209)\n assert road['type'] == 'secondary'\n assert road['name'] == '9209'\n road = query_row(db_conf, 'osm_roads', 9210)\n assert road['type'] == 'residential'\n assert road['name'] == '9210'\n\n park = query_row(db_conf, 'osm_landusages', -9301)\n assert park['type'] == 'park'\n assert park['name'] == '' # no name on relation\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9309)\n assert road['type'] == 'secondary'\n assert road['name'] == '9309'\n road = query_row(db_conf, 'osm_roads', 9310)\n assert road['type'] == 'residential'\n assert road['name'] == '9310'",
"def test_change_domain_type_assignment_rule(self):\n pass",
"def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel",
"def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def insert_drupal_relation(db_obj, db_cur, e1_entity_type, e1_entity_id,\n relation_cv, e2_entity_type, e2_entity_id):\n\n # relation details\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n relation_value = relation_cv[2]\n\n # prepare for a transaction\n db_ac = db_obj.autocommit(None)\n db_obj.autocommit(False)\n\n # get the timestamp\n curr_time = int(time.time())\n\n # insert the data row for the relation\n query_str = (\n'''\nINSERT INTO relation\n(relation_type, vid, uid, created, changed, arity)\nVALUES\n(%s, 0, 1, %s, %s, 2)\n'''\n )\n query_args = [relation_type, curr_time, curr_time]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n\n # get the new relation ID\n ret = db_obj.get_last_id(db_cur)\n if not ret[0]:\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n rid = ret[1]\n\n # insert the revision row for the relation\n query_str = (\n'''\nINSERT INTO relation_revision\n(rid, relation_type, uid, changed, arity)\nVALUES\n(%s, %s, 1, %s, 2)\n'''\n )\n query_args = [rid, relation_type, curr_time]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n\n # get the new revision ID\n ret = db_obj.get_last_id(db_cur)\n if not ret[0]:\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n vid = ret[1]\n\n # update the relation row with the revision ID\n query_str = (\n'''\nUPDATE relation\nSET vid = %s\nWHERE rid = %s\n'''\n )\n query_args = [vid, rid]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n\n # insert data and revision rows for the endpoints\n endpoints = [(0, e1_entity_type, e1_entity_id),\n (1, e2_entity_type, e2_entity_id)]\n for i, ep_entity_type, ep_entity_id in endpoints:\n for table_infix in ['data', 'revision']:\n # query string and arguments\n query_str = (\n'''\nINSERT INTO field_{0}_endpoints\n(entity_type, bundle, deleted, entity_id, revision_id, language, delta,\n endpoints_entity_type, endpoints_entity_id, endpoints_r_index)\nVALUES\n('relation', %s, 0, %s, %s, 'und', %s, %s, %s, %s)\n''' .\n format(table_infix)\n )\n query_args = [relation_type, rid, vid, i, ep_entity_type,\n ep_entity_id, i]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return (None, None, None)\n\n # finish the transaction\n ret = db_obj.commit()\n db_obj.autocommit(db_ac)\n if not ret:\n return (None, None, None)\n\n # key field\n if len(relation_ident) > 2:\n if not insert_drupal_field(db_obj, db_cur, 'relation',\n relation_type, rid, vid,\n (('field', relation_field_name),\n relation_value_type, relation_value)):\n return (False, rid, vid)\n\n # default field values\n f_defs = get_drupal_field_defaults(db_obj, db_cur, 'relation',\n relation_type)\n if f_defs is None:\n return (False, rid, vid)\n for f_def in f_defs:\n if not insert_drupal_field(db_obj, db_cur, 'relation',\n relation_type, rid, vid, f_def):\n return (False, rid, vid)\n\n return (True, rid, vid)"
]
| [
"0.9133543",
"0.8214896",
"0.812729",
"0.7941225",
"0.7591235",
"0.7403597",
"0.7184459",
"0.65345156",
"0.6493854",
"0.6439015",
"0.6344578",
"0.6220084",
"0.6152567",
"0.6058518",
"0.60384",
"0.6002407",
"0.59824777",
"0.5944071",
"0.58435047",
"0.5840866",
"0.584035",
"0.58365124",
"0.5827067",
"0.5797381",
"0.5778508",
"0.57536304",
"0.5719563",
"0.5710921",
"0.5690418",
"0.5676422"
]
| 0.94728637 | 0 |
Test case for add_relation_types | def test_add_relation_types(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_relation_type(self):\n pass",
"def test_change_relation_types(self):\n pass",
"def test_find_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def test_remove_relation_types(self):\n pass",
"def test_remove_relation_type(self):\n pass",
"def test07_add_type_triples(self):\n r = LDPRS('http://ex.org/abc')\n g = Graph()\n r.add_type_triples(g)\n self.assertEqual(len(g), 2)",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})",
"def test_get_types(self):\n pass",
"def add_types(conn, cur, types):\n\n print 'Adding types...',\n \n for type in types:\n cur.execute('INSERT INTO types VALUES (\"{_id}\", \"{name}\")'.format(\n _id = type['id'],\n name = type['name'],\n )\n )\n \n conn.commit()\n \n print 'done.'",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_ticket_type_add_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('ticket_type add new_type')\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_related_add_existing_record(app, related_record, testdata):\n # Test language\n docs = testdata[\"documents\"]\n\n doc6 = Document.get_record_by_pid(docs[4][\"document_pid\"])\n doc9 = Document.get_record_by_pid(docs[7][\"document_pid\"])\n assert len(doc6.related.languages) == len(doc9.related.languages) == 2\n with pytest.raises(RelatedRecordError):\n doc6.related.add_language(doc9)\n assert len(doc6.related.languages) == len(doc9.related.languages) == 2\n\n # Test edition\n doc3 = Document.get_record_by_pid(docs[2][\"document_pid\"])\n doc4 = Document.get_record_by_pid(docs[3][\"document_pid\"])\n assert len(doc3.related.editions) == len(doc4.related.editions) == 4\n with pytest.raises(RelatedRecordError):\n doc4.related.add_edition(doc3)\n assert len(doc3.related.editions) == len(doc4.related.editions) == 4",
"def test_get_relationship_templates(self):\n pass",
"def test_getCpfRelations(self):\n pass",
"def test_type_builder_handles_reference_types():\n schema = [\n SchemaObject(\n name=\"ObjectA\",\n properties=[SchemaReference(name=\"refB\", reference=\"ObjectB\")],\n ),\n SchemaObject(\n name=\"ObjectB\",\n properties=[SchemaReference(name=\"refC\", reference=\"ObjectC\")],\n ),\n SchemaObject(\n name=\"ObjectC\",\n properties=[SchemaValue(name=\"intValue\", value_type=\"number\")],\n ),\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 3\n assert build_result[0] == ClassDefinition(\n name=\"ObjectA\",\n properties=[\n PropertyDefinition(\n name=\"ref_b\", key=\"refB\", value_type=\"ObjectB\", known_type=False\n )\n ],\n depends_on={\"ObjectB\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ObjectB\",\n properties=[\n PropertyDefinition(\n name=\"ref_c\", key=\"refC\", value_type=\"ObjectC\", known_type=False\n )\n ],\n depends_on={\"ObjectC\"},\n )\n assert build_result[2] == ClassDefinition(\n name=\"ObjectC\",\n properties=[\n PropertyDefinition(\n name=\"int_value\", key=\"intValue\", value_type=\"int\", known_type=True\n )\n ],\n depends_on=set(),\n )",
"def insert_rel(source, rel_type, target, change_list=None):\n print(\"Insert %s =%s=> %s\" % (source.id, rel_type, target.id))\n ss = source\n if [r for r in ss.synset_relations if r.target ==\n target.id and r.rel_type == rel_type]:\n print(\"Already exists\")\n return\n ss.synset_relations.append(SynsetRelation(target.id, rel_type))\n if change_list:\n change_list.change_synset(target)",
"def _AddType(self, entity_type):\n if not entity_type.IsValid():\n self.AddFindings(entity_type.GetFindings())\n return False\n return self.local_namespace.InsertType(entity_type)",
"def test_get_contact_person_types(self):\n pass",
"def _create_new_relation_concept(self, rc_type, data_dict):\n # generate name, create individual with role assignments\n i = self.auto_generated_name_numbers[rc_type]\n self.auto_generated_name_numbers[rc_type] += 1\n relation_name = f\"i{rc_type.name}_{i}\"\n\n kwargs = {}\n for key, value in data_dict.items():\n res = self._handle_key_for_individual(key, value, relation_name, None)\n if res is not None:\n kwargs.update(res)\n\n relation_individual = self._create_individual(rc_type, relation_name, relation_name, label=None, kwargs=kwargs)\n\n return relation_individual",
"def test_add_asset_type_assignment_rule(self):\n pass",
"def test_single_named_link_with_custom_type():\n pass",
"def addRelation(klass, relation, rtype):\n if type(relation) is dict:\n if not relation.has_key('name'):\n msg = \"No key 'name' in the relation %s in class %s\" % (relation, klass.__name__)\n raise InvalidRelationshipError, msg\n name = relation['name']\n args = relation\n else:\n name = relation\n args = {}\n relationshipKlass = Relationship.TYPES[rtype]\n klass.RELATIONSHIP_CACHE[name] = (relationshipKlass, args)",
"def test_related_add_existing_child(related_record, testdata):\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc6 = Document.get_record_by_pid(docs[4][\"document_pid\"])\n ser10 = Series.get_record_by_pid(series[1][\"series_pid\"])\n\n # Should fail if trying to add a child that already has relations\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc1)\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc6)",
"def test_relation_ways_inserted():\n park = query_row(db_conf, 'osm_landusages', -9201)\n assert park['type'] == 'park'\n assert park['name'] == '9209'\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9209)\n assert road['type'] == 'secondary'\n assert road['name'] == '9209'\n road = query_row(db_conf, 'osm_roads', 9210)\n assert road['type'] == 'residential'\n assert road['name'] == '9210'\n\n park = query_row(db_conf, 'osm_landusages', -9301)\n assert park['type'] == 'park'\n assert park['name'] == '' # no name on relation\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9309)\n assert road['type'] == 'secondary'\n assert road['name'] == '9309'\n road = query_row(db_conf, 'osm_roads', 9310)\n assert road['type'] == 'residential'\n assert road['name'] == '9310'",
"def test_add_source_type(self):\n # check if documentalist has access to create form\n self.login_documentalist()\n response = self.client.get('/type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = { \n 'status': '0',\n 'acronym': 'site',\n 'name': 'Website',\n 'language' : 'pt-br',\n 'sourcetypelocal_set-TOTAL_FORMS': '0', \n 'sourcetypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/type/new', form_data, follow=True )\n \n self.assertRedirects(response, '/types')\n self.assertContains(response, \"Website\")"
]
| [
"0.93007845",
"0.8362059",
"0.80940545",
"0.80459434",
"0.7731855",
"0.7619851",
"0.73669165",
"0.6537363",
"0.62040925",
"0.61279976",
"0.5987119",
"0.5975522",
"0.5888174",
"0.5873747",
"0.5861829",
"0.5845702",
"0.5827523",
"0.57859147",
"0.57804865",
"0.5750537",
"0.5722008",
"0.5679166",
"0.5666676",
"0.56383955",
"0.56203896",
"0.56201077",
"0.5604516",
"0.559549",
"0.55848944",
"0.5560876"
]
| 0.9475202 | 0 |
Test case for change_relation_type | def test_change_relation_type(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_change_relation_types(self):\n pass",
"def test_add_relation_type(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_remove_relation_type(self):\n pass",
"def test_remove_relation_types(self):\n pass",
"def test_find_relation_types(self):\n pass",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def test_change_domain_type_assignment_rule(self):\n pass",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_change_asset_type_assignment_rule(self):\n pass",
"def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'",
"def change_type(self, change_type):\n\n self._change_type = change_type",
"def testType(self):\n def setType():\n self.node.type = 'banana'\n\n self.assertRaises(\n ValueError,\n setType\n )\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )\n\n self.node._type = 'cdl'\n\n self.assertEqual(\n 'cdl',\n self.node.type\n )\n\n self.node.type = 'ccc'\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )",
"def _replace_relations(obj, ci, side, field, other_ct, relation_type):\n used_relations = set()\n if getattr(obj, field):\n try:\n other = cdb.CI.objects.get(\n content_type=other_ct,\n object_id=getattr(obj, field).id,\n )\n kwargs = {'relation_type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent'] = other\n else:\n kwargs['parent'] = ci\n kwargs['child'] = other\n used_relations.add(_create_or_update_relation(**kwargs).id)\n except cdb.CI.DoesNotExist:\n pass\n kwargs = {'type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent__content_type'] = other_ct\n else:\n kwargs['parent'] = ci\n kwargs['child__content_type'] = other_ct\n cdb.CIRelation.objects.filter(**kwargs).exclude(\n id__in=used_relations\n ).delete()",
"def testTheType(self, theTestType):\n \n pass",
"def test_change_types(session, client, jwt, change_type, is_general_collateral):\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n json_data = copy.deepcopy(AMENDMENT_STATEMENT)\n json_data['changeType'] = change_type\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n del json_data['createDateTime']\n del json_data['payment']\n del json_data['documentId']\n del json_data['amendmentRegistrationNumber']\n del json_data['courtOrderInformation']\n del json_data['addTrustIndenture']\n del json_data['removeTrustIndenture']\n\n if change_type in (model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL,\n model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL,\n model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE):\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n if change_type == model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE:\n del json_data['addVehicleCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n elif change_type == model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL:\n del json_data['deleteVehicleCollateral']\n del json_data['deleteGeneralCollateral']\n if is_general_collateral:\n del json_data['addVehicleCollateral']\n else:\n del json_data['addGeneralCollateral']\n elif change_type == model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL:\n if is_general_collateral:\n del json_data['addVehicleCollateral']\n del json_data['deleteVehicleCollateral']\n else:\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n if change_type in (model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE,\n model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER,\n model_utils.REG_TYPE_AMEND_SP_TRANSFER):\n del json_data['addVehicleCollateral']\n del json_data['deleteVehicleCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n if change_type == model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE:\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n elif change_type == model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER:\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n elif change_type == model_utils.REG_TYPE_AMEND_SP_TRANSFER:\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n\n base_reg_num = 'TEST0001'\n\n json_data['baseRegistrationNumber'] = base_reg_num\n # Set well known ids for deletes\n if 'deleteDebtors' in json_data:\n json_data['deleteDebtors'][0]['partyId'] = 200000024\n if 'deleteSecuredParties' in json_data:\n json_data['deleteSecuredParties'][0]['partyId'] = 200000026\n if 'deleteGeneralCollateral' in json_data:\n json_data['deleteGeneralCollateral'][0]['collateraId'] = 200000000\n if 'deleteVehicleCollateral' in json_data:\n json_data['deleteVehicleCollateral'][0]['vehicleId'] = 200000008\n\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n # check\n # print(rv.json)\n assert rv.status_code == HTTPStatus.CREATED\n assert 'amendmentRegistrationNumber' in rv.json",
"def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def test_superType(self):\n self.assertTrue(ChangeType().superType is not None)",
"def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)",
"def test_getCpfRelations(self):\n pass",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def test_getTypeName(self):\n self.assertEquals(ChangeType().getTypeName(),\n 'test.Change')",
"def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None",
"def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def validate_relation_data(self, sentry_unit, relation, expected):\n actual = sentry_unit.relation(relation[0], relation[1])\n self.log.debug('actual: {}'.format(repr(actual)))\n return self._validate_dict_data(expected, actual)",
"def test_relation_ways_inserted():\n park = query_row(db_conf, 'osm_landusages', -9201)\n assert park['type'] == 'park'\n assert park['name'] == '9209'\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9209)\n assert road['type'] == 'secondary'\n assert road['name'] == '9209'\n road = query_row(db_conf, 'osm_roads', 9210)\n assert road['type'] == 'residential'\n assert road['name'] == '9210'\n\n park = query_row(db_conf, 'osm_landusages', -9301)\n assert park['type'] == 'park'\n assert park['name'] == '' # no name on relation\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9309)\n assert road['type'] == 'secondary'\n assert road['name'] == '9309'\n road = query_row(db_conf, 'osm_roads', 9310)\n assert road['type'] == 'residential'\n assert road['name'] == '9310'",
"def test_returns_correct_relation(self):\n rel = self.Test.current_scope()\n assert not rel\n scoped = self.Test.relation().clone()\n self.Test._scoped_methods = [scoped]\n self.assertEqual(self.Test.current_scope(), scoped)",
"def target_type(self):",
"def test_type(self, test_type):\n self._test_type = test_type"
]
| [
"0.9128707",
"0.8218508",
"0.79304695",
"0.79276407",
"0.7637085",
"0.7394804",
"0.73342395",
"0.67068696",
"0.65558094",
"0.61641324",
"0.60887015",
"0.60078526",
"0.58326256",
"0.58233964",
"0.5810466",
"0.5723773",
"0.5695491",
"0.56949335",
"0.56716394",
"0.56711334",
"0.5670767",
"0.56697243",
"0.56565475",
"0.56403446",
"0.5640196",
"0.55548775",
"0.5487305",
"0.546395",
"0.5457568",
"0.54242074"
]
| 0.93498015 | 0 |
Test case for change_relation_types | def test_change_relation_types(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_change_relation_type(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_add_relation_type(self):\n pass",
"def test_remove_relation_types(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def test_find_relation_types(self):\n pass",
"def test_remove_relation_type(self):\n pass",
"def test_change_domain_type_assignment_rule(self):\n pass",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'",
"def _replace_relations(obj, ci, side, field, other_ct, relation_type):\n used_relations = set()\n if getattr(obj, field):\n try:\n other = cdb.CI.objects.get(\n content_type=other_ct,\n object_id=getattr(obj, field).id,\n )\n kwargs = {'relation_type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent'] = other\n else:\n kwargs['parent'] = ci\n kwargs['child'] = other\n used_relations.add(_create_or_update_relation(**kwargs).id)\n except cdb.CI.DoesNotExist:\n pass\n kwargs = {'type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent__content_type'] = other_ct\n else:\n kwargs['parent'] = ci\n kwargs['child__content_type'] = other_ct\n cdb.CIRelation.objects.filter(**kwargs).exclude(\n id__in=used_relations\n ).delete()",
"def test_change_asset_type_assignment_rule(self):\n pass",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_change_types(session, client, jwt, change_type, is_general_collateral):\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n json_data = copy.deepcopy(AMENDMENT_STATEMENT)\n json_data['changeType'] = change_type\n json_data['debtorName']['businessName'] = 'TEST BUS 2 DEBTOR'\n del json_data['createDateTime']\n del json_data['payment']\n del json_data['documentId']\n del json_data['amendmentRegistrationNumber']\n del json_data['courtOrderInformation']\n del json_data['addTrustIndenture']\n del json_data['removeTrustIndenture']\n\n if change_type in (model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL,\n model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL,\n model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE):\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n if change_type == model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE:\n del json_data['addVehicleCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n elif change_type == model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL:\n del json_data['deleteVehicleCollateral']\n del json_data['deleteGeneralCollateral']\n if is_general_collateral:\n del json_data['addVehicleCollateral']\n else:\n del json_data['addGeneralCollateral']\n elif change_type == model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL:\n if is_general_collateral:\n del json_data['addVehicleCollateral']\n del json_data['deleteVehicleCollateral']\n else:\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n if change_type in (model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE,\n model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER,\n model_utils.REG_TYPE_AMEND_SP_TRANSFER):\n del json_data['addVehicleCollateral']\n del json_data['deleteVehicleCollateral']\n del json_data['addGeneralCollateral']\n del json_data['deleteGeneralCollateral']\n if change_type == model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE:\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n del json_data['addDebtors']\n elif change_type == model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER:\n del json_data['addSecuredParties']\n del json_data['deleteSecuredParties']\n elif change_type == model_utils.REG_TYPE_AMEND_SP_TRANSFER:\n del json_data['addDebtors']\n del json_data['deleteDebtors']\n\n base_reg_num = 'TEST0001'\n\n json_data['baseRegistrationNumber'] = base_reg_num\n # Set well known ids for deletes\n if 'deleteDebtors' in json_data:\n json_data['deleteDebtors'][0]['partyId'] = 200000024\n if 'deleteSecuredParties' in json_data:\n json_data['deleteSecuredParties'][0]['partyId'] = 200000026\n if 'deleteGeneralCollateral' in json_data:\n json_data['deleteGeneralCollateral'][0]['collateraId'] = 200000000\n if 'deleteVehicleCollateral' in json_data:\n json_data['deleteVehicleCollateral'][0]['vehicleId'] = 200000008\n\n rv = client.post('/api/v1/financing-statements/' + base_reg_num + '/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE]),\n content_type='application/json')\n # check\n # print(rv.json)\n assert rv.status_code == HTTPStatus.CREATED\n assert 'amendmentRegistrationNumber' in rv.json",
"def test_getCpfRelations(self):\n pass",
"def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )",
"def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_rename_visitor_type_coverage(self) -> None:\n type_sets = [\n RenameSchemaTypesVisitor.noop_types,\n RenameSchemaTypesVisitor.rename_types,\n ]\n all_types = {snake_to_camel(node_type) + \"Node\" for node_type in QUERY_DOCUMENT_KEYS}\n type_sets_union: Set[str] = set()\n for type_set in type_sets:\n self.assertTrue(type_sets_union.isdisjoint(type_set))\n type_sets_union.update(type_set)\n self.assertEqual(all_types, type_sets_union)",
"def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)",
"def test_add_domain_type_assignment_rule(self):\n pass",
"def test_0030_reactivate_datatypes_repository(self):\n installed_repository = self.test_db_util.get_installed_repository_by_name_owner(\n column_maker_repository_name, common.test_user_1_name\n )\n self.reactivate_repository(installed_repository)\n # This used to reactive datatype repositories and verify counts...\n # test may be considerably less useful now.",
"def test_getTypeName(self):\n self.assertEquals(ChangeType().getTypeName(),\n 'test.Change')",
"def testType(self):\n def setType():\n self.node.type = 'banana'\n\n self.assertRaises(\n ValueError,\n setType\n )\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )\n\n self.node._type = 'cdl'\n\n self.assertEqual(\n 'cdl',\n self.node.type\n )\n\n self.node.type = 'ccc'\n\n self.assertEqual(\n 'ccc',\n self.node.type\n )",
"def testTheType(self, theTestType):\n \n pass",
"def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None",
"def test_get_relationship_templates(self):\n pass",
"def change_type(self, change_type):\n\n self._change_type = change_type",
"def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def test_superType(self):\n self.assertTrue(ChangeType().superType is not None)",
"def test_relation_ways_inserted():\n park = query_row(db_conf, 'osm_landusages', -9201)\n assert park['type'] == 'park'\n assert park['name'] == '9209'\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9209)\n assert road['type'] == 'secondary'\n assert road['name'] == '9209'\n road = query_row(db_conf, 'osm_roads', 9210)\n assert road['type'] == 'residential'\n assert road['name'] == '9210'\n\n park = query_row(db_conf, 'osm_landusages', -9301)\n assert park['type'] == 'park'\n assert park['name'] == '' # no name on relation\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9309)\n assert road['type'] == 'secondary'\n assert road['name'] == '9309'\n road = query_row(db_conf, 'osm_roads', 9310)\n assert road['type'] == 'residential'\n assert road['name'] == '9310'"
]
| [
"0.9235462",
"0.84471434",
"0.8330656",
"0.78718644",
"0.77977645",
"0.77977455",
"0.7726924",
"0.6432496",
"0.6326128",
"0.600536",
"0.5909689",
"0.58644736",
"0.58470744",
"0.5787336",
"0.57735205",
"0.57534206",
"0.5746941",
"0.5716575",
"0.57018447",
"0.5683999",
"0.5682092",
"0.5618198",
"0.56093353",
"0.5608985",
"0.5602591",
"0.55784035",
"0.5487384",
"0.54719245",
"0.5465614",
"0.54645777"
]
| 0.9392614 | 0 |
Test case for find_relation_types | def test_find_relation_types(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_relation_type(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_add_relation_type(self):\n pass",
"def test_change_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_remove_relation_types(self):\n pass",
"def test_remove_relation_type(self):\n pass",
"def test_get_types(self):\n pass",
"def test_get_relationship_templates(self):\n pass",
"def test_getCpfRelations(self):\n pass",
"def test_get_contact_person_types(self):\n pass",
"def test_get_all_ancestor_types(self):\n pass",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)",
"def find_relationtype(self, relation):\n\t\treturn re.match('[a-z\\_]*(?=\\()',relation).group(0)",
"def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )",
"def test_select_by_concept_type__no_matches(self, select_relationships):\n select_relationships.return_value = []\n\n result = FactQuery._select_by_concept_type(Mock(name='concept_types'))\n self.assertEqual([], result)",
"def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def testTypeDescendants(self):\n\n cmisClient = CmisClient(self.url, self.user, self.pwd,\n binding=self.binding,\n **self.ext_args)\n repo = cmisClient.getDefaultRepository()\n typeDefs = repo.getTypeDescendants()\n folderDef = None\n for typeDef in typeDefs:\n if typeDef.getTypeId() == 'cmis:folder':\n folderDef = typeDef\n break\n assert folderDef\n assert folderDef.baseId",
"def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret",
"def _get_derived_feature_types(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature_relationship'))\n logger.info(\"determining some feature types based on relationships\")\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n (feature_relationship_id, subject_id, object_id, type_id, rank,\n value) = line\n\n if int(type_id) in [133526, 129784]:\n # derived_tp_assoc_alleles\n self.feature_types[subject_id] = \\\n Genotype.genoparts['transgenic_insertion']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n elif int(type_id) in [133533, 129791]:\n # only take the derived_sf_assoc_alleles\n # my subject is a reagent_targeted_gene\n # my object is the dsRNA\n self.feature_types[subject_id] = \\\n Genotype.genoparts['reagent_targeted_gene']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n\n else:\n continue\n\n return",
"def test_get_related_nodes(self):\n pass",
"def findType(*args, deep: bool=True, exact: bool=True, forward: bool=True, type: AnyStr=\"\",\n **kwargs)->List[AnyStr]:\n pass",
"def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_get_contact_person_types_key(self):\n pass",
"def test_find_recommendation_type(self):\n recommendations = RecommendationFactory.create_batch(1)\n for recommendation in recommendations:\n recommendation.create()\n logging.debug(recommendations)\n\n # find the recommendation in the list\n recommendation = Recommendation.find(recommendations[0].product_id, recommendations[0].recommendation_product_id)\n self.assertIsNot(recommendation, None)\n self.assertEqual(recommendation.product_id, recommendations[0].product_id)\n self.assertEqual(recommendation.recommendation_product_id, recommendations[0].recommendation_product_id)\n self.assertEqual(recommendation.relationship, recommendations[0].relationship)",
"def get_drupal_relation_ids(db_obj, db_cur, e1_entity_type, e1_entity_id,\n relation_cv, e2_entity_type, e2_entity_id):\n\n # relation details\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n\n # handle key relation-field\n relation_field_join = ''\n relation_field_cond = ''\n relation_value_cond = ''\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n\n # field join\n relation_field_join = (\n 'LEFT JOIN field_data_field_{0} AS k_rf\\n'\n ' ON k_rf.entity_id = e2.entity_id\\n'\n ' AND k_rf.revision_id = e2.revision_id' .\n format(relation_field_name)\n )\n\n # conditions\n relation_field_cond = (\n \"AND k_rf.entity_type = 'relation'\\n\"\n \"AND k_rf.deleted = 0\"\n )\n\n # handle value type\n if relation_value_type.startswith('term: '):\n relation_key_column = 'k_rf_t.name'\n relation_field_join += (\n '\\nLEFT JOIN taxonomy_term_data AS k_rf_t\\n'\n 'ON k_rf_t.tid = k_rf.field_{0}_tid' .\n format(relation_field_name)\n )\n elif relation_value_type == 'ip':\n relation_key_column = (\n 'k_rf.field_{0}_start'.format(relation_field_name)\n )\n else:\n relation_key_column = (\n 'k_rf.field_{0}_value'.format(relation_field_name)\n )\n\n # handle specified field value\n if len(relation_cv) > 2:\n relation_value = relation_cv[2]\n relation_value_cond = (\n 'AND {0} = %s'.format(relation_key_column)\n )\n\n # query string and arguments\n query_str = (\n'''\nSELECT e1.entity_id, e1.revision_id\nFROM field_data_endpoints AS e1\nLEFT JOIN field_data_endpoints AS e2\n ON e2.entity_id = e1.entity_id\n AND e2.revision_id = e1.revision_id\n AND e2.endpoints_r_index > e1.endpoints_r_index\n{0}\nWHERE e1.revision_id IN\n (SELECT MAX(vid)\n FROM relation_revision\n GROUP BY rid)\nAND e1.entity_type = 'relation'\nAND e1.bundle = %s\nAND e1.endpoints_entity_type = %s\nAND e1.endpoints_entity_id = %s\nAND e1.deleted = 0\nAND e2.endpoints_entity_type = %s\nAND e2.endpoints_entity_id = %s\nAND e2.deleted = 0\n{1}\n{2}\n''' .\n format(relation_field_join, relation_field_cond,\n relation_value_cond)\n )\n query_args = [relation_type, e1_entity_type, e1_entity_id,\n e2_entity_type, e2_entity_id]\n if len(relation_ident) > 2 and len(relation_cv) > 2:\n query_args.append(relation_value)\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n return ret[1]",
"def test_get_transaction_types(self):\n pass",
"def find_type(source, target):\n x = [r for r in source.synset_relations if r.target == target.id]\n if len(x) != 1:\n raise Exception(\n \"Synsets not linked or linked by more than one property\")\n return x[0].rel_type"
]
| [
"0.82754517",
"0.7830468",
"0.7545067",
"0.74384326",
"0.7220789",
"0.7081365",
"0.67301106",
"0.6446792",
"0.63950753",
"0.6385406",
"0.6292394",
"0.6291715",
"0.614222",
"0.61386603",
"0.6076577",
"0.6062812",
"0.59512234",
"0.5938554",
"0.5795784",
"0.5777835",
"0.5776928",
"0.57091314",
"0.56767404",
"0.56172633",
"0.5610646",
"0.5568756",
"0.55616015",
"0.55042446",
"0.54952025",
"0.5445048"
]
| 0.93213457 | 0 |
Test case for get_relation_type | def test_get_relation_type(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_find_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_add_relation_type(self):\n pass",
"def test_change_relation_types(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def find_relationtype(self, relation):\n\t\treturn re.match('[a-z\\_]*(?=\\()',relation).group(0)",
"def test_remove_relation_type(self):\n pass",
"def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")",
"def test_remove_relation_types(self):\n pass",
"def _reltype(self):\n return self.__reltype",
"def test_get_relationship_templates(self):\n pass",
"def get_relationship_type(\n r: Union[\"ModelRelationship\", t.RelationshipType, t.RelationshipName, str]\n) -> t.RelationshipType:\n relationship_type = r.type if isinstance(r, ModelRelationship) else r\n return t.RelationshipType(normalize_relationship_type(relationship_type))",
"def test_get_node_type_name(self):\n pass",
"def test_getCpfRelations(self):\n pass",
"def test_get_contact_person_types(self):\n pass",
"def test_get_types(self):\n pass",
"def test_get_parent_type_name(self):\n pass",
"def get_relation(srt, soort):\n result, multiple = None, None\n if srt != soort or soort in ('funcproc', 'techproc'):\n for relobj in my.rectypes[srt]._meta.get_fields():\n if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:\n result = relobj.name\n multiple = False if relobj.get_internal_type() == 'ForeignKey' else True\n break\n return result, multiple",
"def test_accepts_relation(self):\n self.Test.scope('foo', self.Test.relation().where('foo'))\n self.assertEqual(self.Test.foo().params['where'], ['foo'])",
"def find_type(source, target):\n x = [r for r in source.synset_relations if r.target == target.id]\n if len(x) != 1:\n raise Exception(\n \"Synsets not linked or linked by more than one property\")\n return x[0].rel_type",
"def target_type(self):",
"def test_companies_company_id_connections_connection_id_options_data_type_get(self):\n pass",
"def _get_derived_feature_types(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature_relationship'))\n logger.info(\"determining some feature types based on relationships\")\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n (feature_relationship_id, subject_id, object_id, type_id, rank,\n value) = line\n\n if int(type_id) in [133526, 129784]:\n # derived_tp_assoc_alleles\n self.feature_types[subject_id] = \\\n Genotype.genoparts['transgenic_insertion']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n elif int(type_id) in [133533, 129791]:\n # only take the derived_sf_assoc_alleles\n # my subject is a reagent_targeted_gene\n # my object is the dsRNA\n self.feature_types[subject_id] = \\\n Genotype.genoparts['reagent_targeted_gene']\n sid = self.idhash['allele'].get(subject_id)\n model.addType(sid, self.feature_types[subject_id])\n\n else:\n continue\n\n return",
"def _parse_relation(chunk, type=\"O\"):\n r1 = chunk.get(XML_RELATION)\n r2 = chunk.get(XML_ID, chunk.get(XML_OF))\n r1 = [x != \"-\" and x or None for x in r1.split(\"|\")] or [None]\n r2 = [x != \"-\" and x or None for x in r2.split(\"|\")] or [None]\n r2 = [x is not None and x.split(_UID_SEPARATOR )[-1] or x for x in r2]\n if len(r1) < len(r2): r1 = r1 + r1 * (len(r2)-len(r1)) # [1] [\"SBJ\", \"OBJ\"] => \"SBJ-1;OBJ-1\"\n if len(r2) < len(r1): r2 = r2 + r2 * (len(r1)-len(r2)) # [2,4] [\"OBJ\"] => \"OBJ-2;OBJ-4\"\n return \";\".join([\"-\".join([x for x in (type, r1, r2) if x]) for r1, r2 in zip(r1, r2)])",
"def test_getEntityType(self):\n cases = [\n (self.test_eac + \"NE00001.xml\",\"concept\"),\n (self.test_eac + \"NE00700.xml\",\"concept\"),\n (self.test_eac + \"NE01400.xml\",\"corporateBody\"),\n (self.test_eac + \"NE00301.xml\",\"corporateBody\"),\n (self.test_eac + \"NE01201.xml\",\"person\"),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getEntityType()\n self.assertNotEqual(result, None)\n self.assertEqual(result, expected)",
"def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)",
"def __get_linkage_type(self):\n return random.choice(self.LINKAGE_TYPE)",
"def test_get(self):\n self.assertEqual(self.expected_described_model, self.mapped_model.get(\"described_model_type\"))",
"def calculate_type(\n *, schemas: types.Schemas, schema: types.Schema\n) -> types.PropertyType:\n json_value = peek.json(schema=schema, schemas=schemas)\n if json_value is True:\n return types.PropertyType.JSON\n\n property_type = peek.type_(schema=schema, schemas=schemas)\n if property_type in type_.SIMPLE_TYPES:\n return types.PropertyType.SIMPLE\n\n read_only_value = peek.read_only(schema=schema, schemas=schemas)\n if read_only_value is True:\n return types.PropertyType.BACKREF\n\n return types.PropertyType.RELATIONSHIP"
]
| [
"0.8218984",
"0.79172564",
"0.7896453",
"0.748885",
"0.74482447",
"0.7196202",
"0.70477825",
"0.6910506",
"0.66051215",
"0.6566184",
"0.6536505",
"0.64464283",
"0.6385503",
"0.6298449",
"0.6128046",
"0.6045418",
"0.5934041",
"0.58843106",
"0.58825374",
"0.5873009",
"0.5872792",
"0.5801942",
"0.57507384",
"0.57065684",
"0.5638801",
"0.5636226",
"0.5635198",
"0.5634862",
"0.5632159",
"0.5602851"
]
| 0.93895334 | 0 |
Test case for remove_relation_type | def test_remove_relation_type(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'",
"def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None",
"def test_change_relation_types(self):\n pass",
"def test_add_relation_type(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def test_find_relation_types(self):\n pass",
"def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")",
"def test_ticket_type_remove_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type remove bad_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def delete_drupal_relation(db_obj, db_cur, e1_entity_type, e1_entity_id,\n relation_cv, e2_entity_type, e2_entity_id):\n\n # relation details\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n relation_value = relation_cv[2]\n\n # get the relation's IDs\n ret = get_drupal_relation_ids(db_obj, db_cur, e1_entity_type,\n e1_entity_id, relation_cv, e2_entity_type,\n e2_entity_id)\n if ret is None or (len(ret) > 1):\n if ret is None:\n problem = 'could not get the IDs of'\n else:\n problem = 'multiple entries found for'\n if len(relation_ident) > 2:\n msg = (\n'''Warning: {0} the following relation:\n type: {1}\n field_name: {2}\n field_value: {3}\nwith the following endpoints:\n''' .\n format(problem, relation_type, relation_field_name,\n relation_value)\n )\n else:\n msg = (\n'''Warning: {0} the {1} relation with\nthe following endpoints:\n''' .\n format(problem, relation_type)\n )\n msg += (\n''' node1_type: {0}\n node1_id_type: {1}\n node1_value: {2}\n node2_type: {3}\n node2_id_type: {4}\n node2_value: {5}\nSkipping delete.''' .\n format(*map(nori.pps, [node1_type, node1_id_type,\n node1_value, node2_type,\n node2_id_type, node2_value]))\n )\n nori.core.email_logger.error(msg)\n return None\n if not ret:\n return True # assume it's all been deleted already\n relation_id = ret[0][0]\n relation_rev = ret[0][1]\n\n # get the field list\n flist = get_drupal_field_list(db_obj, db_cur, 'relation', relation_type)\n if flist is None:\n # won't be reached currently; script will exit on errors\n return None\n\n # prepare for a transaction\n db_ac = db_obj.autocommit(None)\n db_obj.autocommit(False)\n\n # remove the fields\n for field_name in flist:\n ret = delete_drupal_field(db_obj, db_cur, 'relation', relation_type,\n relation_id, relation_rev,\n (('field', field_name), 'unknown'),\n no_trans=True)\n if not ret:\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # remove the data and revision rows for the endpoints\n for table_infix in ['data', 'revision']:\n # query string and arguments\n if nori.core.cfg['delayed_drupal_deletes']:\n query_str = (\n'''\nUPDATE field_{0}_endpoints\nSET deleted = 1\nWHERE entity_type = 'relation'\nAND bundle = %s\nAND entity_id = %s\nAND revision_id = %s\n''' .\n format(table_infix)\n )\n else:\n query_str = (\n'''\nDELETE FROM field_{0}_endpoints\nWHERE entity_type = 'relation'\nAND bundle = %s\nAND deleted = 0\nAND entity_id = %s\nAND revision_id = %s\n''' .\n format(table_infix)\n )\n query_args = [relation_type, relation_id, relation_rev]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # remove the data and revision rows for the relation\n for table_suffix in ['', '_revision']:\n query_str = (\n'''\nDELETE FROM relation{0}\nWHERE relation_type = %s\nAND rid = %s\nAND vid = %s\n''' .\n format(table_suffix)\n )\n query_args = [relation_type, relation_id, relation_rev]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # finish the transaction\n ret = db_obj.commit()\n db_obj.autocommit(db_ac)\n if not ret:\n return None\n\n return True",
"def _replace_relations(obj, ci, side, field, other_ct, relation_type):\n used_relations = set()\n if getattr(obj, field):\n try:\n other = cdb.CI.objects.get(\n content_type=other_ct,\n object_id=getattr(obj, field).id,\n )\n kwargs = {'relation_type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent'] = other\n else:\n kwargs['parent'] = ci\n kwargs['child'] = other\n used_relations.add(_create_or_update_relation(**kwargs).id)\n except cdb.CI.DoesNotExist:\n pass\n kwargs = {'type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent__content_type'] = other_ct\n else:\n kwargs['parent'] = ci\n kwargs['child__content_type'] = other_ct\n cdb.CIRelation.objects.filter(**kwargs).exclude(\n id__in=used_relations\n ).delete()",
"def test_related_remove_language(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n\n assert len(doc1.related.languages) == 0\n\n doc1.related.add_language(doc2)\n assert len(doc1.related.languages) == 1\n\n doc1.related.remove_language(doc2)\n assert len(doc1.related.languages) == 0",
"def test_remove(self):\n pass",
"def test_search_by_deleted_relation(self):\n db.session.delete(self.rel)\n db.session.commit()\n self.assertSlugs(\"comment\", self.comment.description, [])",
"def removeCorrelator(corrType) :\n try :\n s.removeCorrelator(corrType)\n except Exception, ex:\n print(ex.errorMsg)",
"def delete_resource_relation_by_user(self, *,\n id: str,\n user_id: str,\n relation_type: UserResourceRel,\n resource_type: ResourceType) -> None:\n if resource_type not in resource_relation_model:\n raise NotImplementedError(f'The resource_type {resource_type.name} is not define!')\n\n if relation_type not in resource_relation_model[resource_type]:\n raise NotImplementedError(f'the relation type {relation_type} is not defined!')\n\n res_rel_model = resource_relation_model[resource_type][relation_type]\n res_key = f'{resource_type.name.lower()}_rk'\n user_attr = getattr(res_rel_model, 'user_rk')\n res_attr = getattr(res_rel_model, res_key)\n try:\n with self.client.create_session() as session:\n session.query(res_rel_model).filter(user_attr == user_id, res_attr == id).delete()\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to delete relation between user {user_id} and resource {id}')\n raise e",
"def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)",
"def remove_type(self, ):\n if self.AttributeNames.TYPE in self.attrs:\n del self.attrs[self.AttributeNames.TYPE]\n return self",
"def filter_UI_del_type_rm(account):\n\t_type = read_type()\n\tfiltered = delete_transaction_type(account, _type)\n\tif (not filtered):\n\t\tprint('Nu s-a efectuat nici o filtrare.')\n\telse:\n\t\tprint('Filtrare finalizata.')",
"def test_returns_correct_relation(self):\n self.assertEqual(type(self.Test.relation()).__name__, 'Relation')\n self.assertEqual(self.Test.relation().klass, self.Test)",
"def remove_edge(self, rtype, node1, node2):\n self.nodes[node1].remove_relation(rtype,node2)\n self.nodes[node2].remove_predecessor(rtype,node1)\n self.dirty = True",
"def remove_reference(type):\n nake_type = remove_alias(type)\n if not is_reference(nake_type):\n return type\n else:\n return nake_type.base",
"def test_remove_one(self):\n pass",
"def test_remove():\n # remove dict keys\n schema = Schema({\"weight\": int,\n Remove(\"color\"): str,\n Remove(\"amount\"): int})\n out_ = schema({\"weight\": 10, \"color\": \"red\", \"amount\": 1})\n assert \"color\" not in out_ and \"amount\" not in out_\n\n # remove keys by type\n schema = Schema({\"weight\": float,\n \"amount\": int,\n # remvove str keys with int values\n Remove(str): int,\n # keep str keys with str values\n str: str})\n out_ = schema({\"weight\": 73.4,\n \"condition\": \"new\",\n \"amount\": 5,\n \"left\": 2})\n # amount should stay since it's defined\n # other string keys with int values will be removed\n assert \"amount\" in out_ and \"left\" not in out_\n # string keys with string values will stay\n assert \"condition\" in out_\n\n # remove value from list\n schema = Schema([Remove(1), int])\n out_ = schema([1, 2, 3, 4, 1, 5, 6, 1, 1, 1])\n assert_equal(out_, [2, 3, 4, 5, 6])\n\n # remove values from list by type\n schema = Schema([1.0, Remove(float), int])\n out_ = schema([1, 2, 1.0, 2.0, 3.0, 4])\n assert_equal(out_, [1, 2, 1.0, 4])",
"def delete_sense_relation(wn, source, target, change_list=None):\n delete_sense_rel(wn, source, target, change_list)\n delete_sense_rel(wn, target, source, change_list)",
"def removeTmpRelations(self):\n for rel in Item.RELS:\n delattr(self, 'tmp_{}'.format(rel))",
"def unsetType(self):\n return _libsbml.Association_unsetType(self)",
"def remove_type(self, name):\n del self.types[name]"
]
| [
"0.9083394",
"0.7434875",
"0.74198115",
"0.7399699",
"0.73606783",
"0.70979846",
"0.6849677",
"0.682214",
"0.64918333",
"0.6335386",
"0.60610294",
"0.6020467",
"0.6011264",
"0.60044837",
"0.59442806",
"0.59359443",
"0.5918776",
"0.59070355",
"0.588445",
"0.58156455",
"0.5783452",
"0.5731745",
"0.5712164",
"0.5655617",
"0.5648517",
"0.56449866",
"0.56225246",
"0.5621422",
"0.5605401",
"0.5589773"
]
| 0.94640315 | 0 |
Test case for remove_relation_types | def test_remove_relation_types(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_relation_type(self):\n pass",
"def test_change_relation_types(self):\n pass",
"def test_change_relation_type(self):\n pass",
"def test_add_relation_types(self):\n pass",
"def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'",
"def test_add_relation_type(self):\n pass",
"def test_relation_after_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', 50021) == None\n assert query_row(db_conf, 'osm_landusages', -50021) == None",
"def test_find_relation_types(self):\n pass",
"def test_get_relation_type(self):\n pass",
"def _replace_relations(obj, ci, side, field, other_ct, relation_type):\n used_relations = set()\n if getattr(obj, field):\n try:\n other = cdb.CI.objects.get(\n content_type=other_ct,\n object_id=getattr(obj, field).id,\n )\n kwargs = {'relation_type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent'] = other\n else:\n kwargs['parent'] = ci\n kwargs['child'] = other\n used_relations.add(_create_or_update_relation(**kwargs).id)\n except cdb.CI.DoesNotExist:\n pass\n kwargs = {'type': relation_type}\n if side == 'child':\n kwargs['child'] = ci\n kwargs['parent__content_type'] = other_ct\n else:\n kwargs['parent'] = ci\n kwargs['child__content_type'] = other_ct\n cdb.CIRelation.objects.filter(**kwargs).exclude(\n id__in=used_relations\n ).delete()",
"def test_ticket_type_remove_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type remove bad_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test__removeRelObject(t):\n t.adm._removeRelObject(\"device\", \"objmap\", \"relname\")",
"def test_remove():\n # remove dict keys\n schema = Schema({\"weight\": int,\n Remove(\"color\"): str,\n Remove(\"amount\"): int})\n out_ = schema({\"weight\": 10, \"color\": \"red\", \"amount\": 1})\n assert \"color\" not in out_ and \"amount\" not in out_\n\n # remove keys by type\n schema = Schema({\"weight\": float,\n \"amount\": int,\n # remvove str keys with int values\n Remove(str): int,\n # keep str keys with str values\n str: str})\n out_ = schema({\"weight\": 73.4,\n \"condition\": \"new\",\n \"amount\": 5,\n \"left\": 2})\n # amount should stay since it's defined\n # other string keys with int values will be removed\n assert \"amount\" in out_ and \"left\" not in out_\n # string keys with string values will stay\n assert \"condition\" in out_\n\n # remove value from list\n schema = Schema([Remove(1), int])\n out_ = schema([1, 2, 3, 4, 1, 5, 6, 1, 1, 1])\n assert_equal(out_, [2, 3, 4, 5, 6])\n\n # remove values from list by type\n schema = Schema([1.0, Remove(float), int])\n out_ = schema([1, 2, 1.0, 2.0, 3.0, 4])\n assert_equal(out_, [1, 2, 1.0, 4])",
"def test_related_remove_language(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n\n assert len(doc1.related.languages) == 0\n\n doc1.related.add_language(doc2)\n assert len(doc1.related.languages) == 1\n\n doc1.related.remove_language(doc2)\n assert len(doc1.related.languages) == 0",
"def delete_drupal_relation(db_obj, db_cur, e1_entity_type, e1_entity_id,\n relation_cv, e2_entity_type, e2_entity_id):\n\n # relation details\n relation_ident = relation_cv[0]\n relation_type = relation_ident[1]\n if len(relation_ident) > 2:\n relation_field_name = relation_ident[2]\n relation_value_type = relation_cv[1]\n relation_value = relation_cv[2]\n\n # get the relation's IDs\n ret = get_drupal_relation_ids(db_obj, db_cur, e1_entity_type,\n e1_entity_id, relation_cv, e2_entity_type,\n e2_entity_id)\n if ret is None or (len(ret) > 1):\n if ret is None:\n problem = 'could not get the IDs of'\n else:\n problem = 'multiple entries found for'\n if len(relation_ident) > 2:\n msg = (\n'''Warning: {0} the following relation:\n type: {1}\n field_name: {2}\n field_value: {3}\nwith the following endpoints:\n''' .\n format(problem, relation_type, relation_field_name,\n relation_value)\n )\n else:\n msg = (\n'''Warning: {0} the {1} relation with\nthe following endpoints:\n''' .\n format(problem, relation_type)\n )\n msg += (\n''' node1_type: {0}\n node1_id_type: {1}\n node1_value: {2}\n node2_type: {3}\n node2_id_type: {4}\n node2_value: {5}\nSkipping delete.''' .\n format(*map(nori.pps, [node1_type, node1_id_type,\n node1_value, node2_type,\n node2_id_type, node2_value]))\n )\n nori.core.email_logger.error(msg)\n return None\n if not ret:\n return True # assume it's all been deleted already\n relation_id = ret[0][0]\n relation_rev = ret[0][1]\n\n # get the field list\n flist = get_drupal_field_list(db_obj, db_cur, 'relation', relation_type)\n if flist is None:\n # won't be reached currently; script will exit on errors\n return None\n\n # prepare for a transaction\n db_ac = db_obj.autocommit(None)\n db_obj.autocommit(False)\n\n # remove the fields\n for field_name in flist:\n ret = delete_drupal_field(db_obj, db_cur, 'relation', relation_type,\n relation_id, relation_rev,\n (('field', field_name), 'unknown'),\n no_trans=True)\n if not ret:\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # remove the data and revision rows for the endpoints\n for table_infix in ['data', 'revision']:\n # query string and arguments\n if nori.core.cfg['delayed_drupal_deletes']:\n query_str = (\n'''\nUPDATE field_{0}_endpoints\nSET deleted = 1\nWHERE entity_type = 'relation'\nAND bundle = %s\nAND entity_id = %s\nAND revision_id = %s\n''' .\n format(table_infix)\n )\n else:\n query_str = (\n'''\nDELETE FROM field_{0}_endpoints\nWHERE entity_type = 'relation'\nAND bundle = %s\nAND deleted = 0\nAND entity_id = %s\nAND revision_id = %s\n''' .\n format(table_infix)\n )\n query_args = [relation_type, relation_id, relation_rev]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # remove the data and revision rows for the relation\n for table_suffix in ['', '_revision']:\n query_str = (\n'''\nDELETE FROM relation{0}\nWHERE relation_type = %s\nAND rid = %s\nAND vid = %s\n''' .\n format(table_suffix)\n )\n query_args = [relation_type, relation_id, relation_rev]\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=False):\n # won't be reached currently; script will exit on errors\n db_obj.rollback() # ignore errors\n db_obj.autocommit(db_ac)\n return None\n\n # finish the transaction\n ret = db_obj.commit()\n db_obj.autocommit(db_ac)\n if not ret:\n return None\n\n return True",
"def schemaCleanupTypes():\n libxml2mod.xmlSchemaCleanupTypes()",
"def test_remove(self):\n pass",
"def removeTmpRelations(self):\n for rel in Item.RELS:\n delattr(self, 'tmp_{}'.format(rel))",
"def test_search_by_deleted_relation(self):\n db.session.delete(self.rel)\n db.session.commit()\n self.assertSlugs(\"comment\", self.comment.description, [])",
"def filter_UI_del_type_rm(account):\n\t_type = read_type()\n\tfiltered = delete_transaction_type(account, _type)\n\tif (not filtered):\n\t\tprint('Nu s-a efectuat nici o filtrare.')\n\telse:\n\t\tprint('Filtrare finalizata.')",
"def removeCorrelator(corrType) :\n try :\n s.removeCorrelator(corrType)\n except Exception, ex:\n print(ex.errorMsg)",
"def test_reset_polymorphic_ctype(self):\n Model2A.objects.create(field1='A1')\n Model2D.objects.create(field1='A1', field2='B2', field3='C3', field4='D4')\n Model2B.objects.create(field1='A1', field2='B2')\n Model2B.objects.create(field1='A1', field2='B2')\n Model2A.objects.all().update(polymorphic_ctype_id=None)\n\n with self.assertRaises(PolymorphicTypeUndefined):\n list(Model2A.objects.all())\n\n reset_polymorphic_ctype(Model2D, Model2B, Model2D, Model2A, Model2C)\n\n self.assertQuerysetEqual(\n Model2A.objects.order_by(\"pk\"),\n [\n Model2A,\n Model2D,\n Model2B,\n Model2B,\n ],\n transform=lambda o: o.__class__,\n )",
"def clean_object(metadata, analysistype):\n for sample in metadata:\n try:\n delattr(sample[analysistype], \"targetnames\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"targets\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"dnaseq\")\n except AttributeError:\n pass\n try:\n delattr(sample[analysistype], \"protseq\")\n except AttributeError:\n pass",
"def delete_relation(wn, source, target, change_list=None):\n delete_rel(source, target, change_list)\n delete_rel(target, source, change_list)",
"def del_objects_by_type(self, type_):\n [o.del_object_from_parent() for o in self.get_objects_by_type(type_)]",
"def remove_type(self, ):\n if self.AttributeNames.TYPE in self.attrs:\n del self.attrs[self.AttributeNames.TYPE]\n return self",
"def remove_relations(data=None, **kwargs):\n if request.method == 'OPTIONS':\n return\n keys_to_delete = []\n for key, val in data.iteritems():\n if type(data[key]) is list:\n keys_to_delete.append(key)\n\n for key in keys_to_delete:\n del data[key]",
"def test_001(self):\n\n class A(Model):\n pass\n\n self.assertIs(Model.models.get(\"A\"), A)\n\n Model.models.remove(A)",
"def remove_card_relations(event):\n resource = event.resource\n wall = find_interface(resource, IWall)\n for relation_id in wall.relations_map.find_relations(resource.rid):\n del wall.relations_map[relation_id]",
"def test_delete_collections(self):\n pass"
]
| [
"0.93458295",
"0.77008176",
"0.73982656",
"0.7369096",
"0.722008",
"0.7153807",
"0.71069854",
"0.7079776",
"0.6732576",
"0.6067846",
"0.60438186",
"0.60327435",
"0.5947649",
"0.59213793",
"0.5895194",
"0.5893429",
"0.5832505",
"0.5761203",
"0.5755601",
"0.5693539",
"0.56522644",
"0.56050444",
"0.5588274",
"0.5564242",
"0.555081",
"0.55255324",
"0.54913414",
"0.54863644",
"0.5446223",
"0.5437011"
]
| 0.946908 | 0 |
After the applications have been sent to Ahjo, the handlers should not be able to modify the applications. If the batch is returned without decision (as might theoretically happen), then the handlers may need to make changes again. | def applications_can_be_modified(self):
return self.status in [
ApplicationBatchStatus.DRAFT,
ApplicationBatchStatus.RETURNED,
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __update_application(self, apps, **extra_args):\n update_on_error = extra_args.get('update_on_error', False)\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n for app in apps:\n state = app.execution.state\n old_state = state\n gc3libs.log.debug(\n \"About to update state of application: %s (currently: %s)\",\n app,\n state)\n try:\n if state not in [\n Run.State.NEW,\n Run.State.TERMINATING,\n Run.State.TERMINATED,\n ]:\n lrms = self.get_backend(app.execution.resource_name)\n try:\n state = lrms.update_job_state(app)\n # pylint: disable=broad-except\n except Exception as ex:\n gc3libs.log.debug(\n \"Error getting status of application '%s': %s: %s\",\n app, ex.__class__.__name__, ex, exc_info=True)\n state = Run.State.UNKNOWN\n # run error handler if defined\n ex = app.update_job_state_error(ex)\n if isinstance(ex, Exception):\n raise ex\n if state != old_state:\n app.changed = True\n # set log information accordingly\n if (app.execution.state == Run.State.TERMINATING\n and app.execution.returncode is not None\n and app.execution.returncode != 0):\n # there was some error, try to explain\n app.execution.info = (\n \"Execution failed on resource: %s\" %\n app.execution.resource_name)\n signal = app.execution.signal\n if signal in Run.Signals:\n app.execution.info = (\n \"Abnormal termination: %s\" % signal)\n else:\n if os.WIFSIGNALED(app.execution.returncode):\n app.execution.info = (\n \"Remote job terminated by signal %d\" %\n signal)\n else:\n app.execution.info = (\n \"Remote job exited with code %d\" %\n app.execution.exitcode)\n\n if state != Run.State.UNKNOWN or update_on_error:\n app.execution.state = state\n\n except (gc3libs.exceptions.InvalidArgument,\n gc3libs.exceptions.ConfigurationError,\n gc3libs.exceptions.UnrecoverableAuthError,\n gc3libs.exceptions.FatalError):\n # Unrecoverable; no sense in continuing --\n # pass immediately on to client code and let\n # it handle this...\n raise\n\n except gc3libs.exceptions.UnknownJob:\n # information about the job is lost, mark it as failed\n app.execution.returncode = (Run.Signals.Lost, -1)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n except gc3libs.exceptions.InvalidResourceName:\n # could be the corresponding LRMS has been removed\n # because of an unrecoverable error mark application\n # as state UNKNOWN\n gc3libs.log.warning(\n \"Cannot access computational resource '%s',\"\n \" marking task '%s' as UNKNOWN.\",\n app.execution.resource_name, app)\n app.execution.state = Run.State.TERMINATED\n app.changed = True\n continue\n\n # This catch-all clause is needed otherwise the loop stops\n # at the first erroneous iteration\n #\n # pylint: disable=broad-except\n except Exception as ex:\n if gc3libs.error_ignored(\n # context:\n # - module\n 'core',\n # - class\n 'Core',\n # - method\n 'update_job_state',\n # - actual error class\n ex.__class__.__name__,\n # - additional keywords\n 'update',\n ):\n gc3libs.log.warning(\n \"Ignored error in Core.update_job_state(): %s\", ex)\n # print again with traceback at a higher log level\n gc3libs.log.debug(\n \"(Original traceback follows.)\", exc_info=True)\n continue\n else:\n # propagate generic exceptions for debugging purposes\n raise",
"def retag_all_batches(apps, schema_editor):\n pass",
"def cleanup(self, batch: PayloadDictList, need_retry: bool) -> None:\n if not need_retry:\n return\n\n for event in batch:\n if not event in self.event_buffer:\n if not self.add_event(event):\n return",
"def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()",
"def update_batch(self, *args, **kwargs):\n pass",
"def handle_batch(self, batch: Mapping[str, Any]) -> None:\n self.batch = {**batch, **self.forward(batch)}",
"def __call__(self, graph):\n result = graph.sqs_message_dispatcher.handle_batch()\n if not result.message_count:\n raise SleepNow()",
"def on_batch_end(self, batch, logs=None):",
"def LoadApplications(self):\n\n self.__applicationList.clear()\n try:\n self.__api = ApplicationApi\n self.__apiexc = ApiException\n\n api_instance = self.__api(self.__engine.api_client)\n a = paginator(\n api_instance,\n \"get_all_applications\",\n _request_timeout=self.__engine.get_timeout())\n\n if a.response_list:\n for c in a.response_list:\n application = DxApplication(self.__engine)\n application.from_obj(c)\n if self.__engine.version_ge(\"6.0.0.0\") and c.application_id is not None:\n self.__applicationList[c.application_id] = application\n else:\n self.__applicationList[c.application_name] = application\n else:\n print_error(\"No applications found\")\n return 1\n\n except self.__apiexc as e:\n print_error(e.body)\n self.__logger.error(e.body)\n return 1",
"def test_block_missing_batch(self):\n pass",
"def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')",
"def cleanup(batch: PayloadDictList, need_retry: bool) -> None:\n ...",
"def flush_batch(self, batch: Sequence[TResult]) -> None:\n pass",
"def should_handle_all_batches(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"should_handle_all_batches\")",
"def test_update_application_errors(self):\r\n with self.flask_app.app_context():\r\n\r\n self.register()\r\n self.new_application()\r\n\r\n res = self.update_application(new_name=\"\")\r\n assert \"This field is required\" in res.data\r\n\r\n res = self.update_application(new_short_name=\"\")\r\n assert \"This field is required\" in res.data\r\n\r\n res = self.update_application(new_description=\"\")\r\n assert \"You must provide a description.\" in res.data\r\n\r\n res = self.update_application(new_description=\"a\"*256)\r\n assert \"Field cannot be longer than 255 characters.\" in res.data\r\n\r\n res = self.update_application(new_long_description=\"\")\r\n assert \"This field is required\" not in res.data",
"def process_batch(sm_account_id, graph, interactions, batch_requests, p_session, processed_interactions=None,\n cutoff=None):\n with transaction.manager:\n for interaction in interactions:\n p_session.merge(interaction)\n\n if len(batch_requests) == 0 or (processed_interactions and processed_interactions >= cutoff):\n return\n\n # process batch requests\n # Number of max items in a batch request is 50\n MAX_BATCH_SIZE = 50\n batch_requests_p = [{'method': req.get('method'), 'relative_url': req.get('relative_url')} for req in\n batch_requests]\n batch_data = []\n\n interactions_new = set()\n batch_requests_new = []\n\n for i in range(math.ceil(len(batch_requests_p) / MAX_BATCH_SIZE)):\n # TODO handle connection error. attempt retries\n try:\n batch_req = json.dumps(batch_requests_p[i * MAX_BATCH_SIZE:(i * MAX_BATCH_SIZE) + (MAX_BATCH_SIZE - 1)],\n indent=1)\n batch_data += graph.request(\"\", post_args={\n 'batch': batch_req})\n\n except ConnectionError as e:\n logger.exception('unable to process batch request \\n:{}'.format(batch_req))\n for req, batch_response in zip(batch_requests, batch_data):\n parent_id = req.get('parent_id')\n if 'body' in batch_response:\n batch_response_data = json.loads(batch_response['body'])\n if 'error' in batch_response_data and batch_response_data['error'].get('code') == 1:\n # handle request failure - 'Please reduce the amount of data you are asking for, then retry your request'\n error_url = req.get('relative_url')\n parse_result = urlparse(error_url)\n query_data = urlparse.parse_qs(parse_result.query)\n old_limit = query_data.get('limit')[0]\n sm_account_id = parse_result.path.split(\"/\")[2]\n new_limit = int(float(old_limit) / 2)\n new_req = get_feed_request(sm_account_id, limit=new_limit)\n batch_requests_new.append(new_req)\n\n if 'data' in batch_response_data:\n for interaction_raw in batch_response_data['data']:\n Interactions.get_nested_interactions(sm_account_id, interaction_raw, interactions_new,\n batch_requests_new, parent_id)\n if 'paging' in batch_response_data and 'next' in batch_response_data['paging']:\n next_url = urlparse(batch_response_data['paging']['next'])\n relative_url = next_url.path + '?' + next_url.query + '&include_headers=false'\n req = {'method': 'GET', 'relative_url': relative_url, 'parent_id': parent_id}\n batch_requests_new.append(req)\n else:\n logger.info('Exception occurred while collecting posts for {} skipping this..'.format(sm_account_id))\n\n process_batch(sm_account_id, graph, interactions_new, batch_requests_new, p_session,\n processed_interactions + len(interactions), cutoff)",
"def on_message_batch(self, messages):\n assert isinstance(messages, list)\n assert len(messages) > 0\n assert all(isinstance(message, Message.Implementation) for message in messages)\n assert all(message.community == messages[0].community for message in messages)\n assert all(message.meta == messages[0].meta for message in messages)\n\n def _filter_fail(message):\n if isinstance(message, DelayMessage):\n if __debug__:\n dprint(message.delayed.candidate, \" delay \", message.delayed, \" (\", message, \")\")\n \n if message.create_request():\n self._statistics.delay_send += 1\n self._statistics.dict_inc(self._statistics.delay, \"om_message_batch:%s\" % message.delayed)\n self._statistics.delay_count += 1\n return False\n\n elif isinstance(message, DropMessage):\n if __debug__:\n dprint(message.dropped.candidate, \" drop: \", message.dropped.name, \" (\", message, \")\", level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"on_message_batch:%s\" % message)\n self._statistics.drop_count += 1\n return False\n\n else:\n return True\n\n meta = messages[0].meta\n\n if __debug__:\n debug_count = len(messages)\n debug_begin = time()\n\n # drop all duplicate or old messages\n assert type(meta.distribution) in self._check_distribution_batch_map\n messages = list(self._check_distribution_batch_map[type(meta.distribution)](messages))\n assert len(messages) > 0 # should return at least one item for each message\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if isinstance(message, Message.Implementation) or _filter_fail(message)]\n if not messages:\n return 0\n\n # check all remaining messages on the community side. may yield Message.Implementation,\n # DropMessage, and DelayMessage instances\n try:\n messages = list(meta.check_callback(messages))\n except:\n dprint(\"exception during check_callback for \", meta.name, exception=True, level=\"error\")\n return 0\n assert len(messages) >= 0 # may return zero messages\n assert all(isinstance(message, (Message.Implementation, DropMessage, DelayMessage)) for message in messages)\n\n if __debug__:\n if len(messages) == 0:\n dprint(meta.check_callback, \" yielded zero messages, drop, or delays. This is allowed but likely to be an error.\", level=\"warning\")\n\n # handle/remove DropMessage and DelayMessage instances\n messages = [message for message in messages if _filter_fail(message)]\n if not messages:\n return 0\n\n # store to disk and update locally\n if __debug__:\n dprint(\"in... \", len(messages), \" \", meta.name, \" messages from \", \", \".join(str(candidate) for candidate in set(message.candidate for message in messages)))\n \n if self.store_update_forward(messages, True, True, False):\n \n self._statistics.dict_inc(self._statistics.success, meta.name, len(messages))\n self._statistics.success_count += len(messages)\n\n # tell what happened\n if __debug__:\n debug_end = time()\n level = \"warning\" if (debug_end - debug_begin) > 1.0 else \"normal\"\n dprint(\"handled \", len(messages), \"/\", debug_count, \" %.2fs\" % (debug_end - debug_begin), \" \", meta.name, \" messages (with \", meta.batch.max_window, \"s cache window)\", level=level)\n \n # return the number of messages that were correctly handled (non delay, duplictes, etc)\n return len(messages)\n \n return 0",
"def on_predict_batch_end(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass",
"def sync_apps(self):\n pass",
"def no_transact_batch(self):\n return NoTransactionBatch(self._client)",
"def BeginExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def on_predict_batch_end(self, batch, logs=None):",
"def process(self, kb_info: dict, kb_meta: dict) -> dict:\n kb_info[\"app\"] = {\n \"title\": kb_meta[\"attributes\"][\"title\"],\n \"subtitle\": kb_meta[\"attributes\"][\"subtitle\"],\n \"version\": kb_meta[\"appCell\"][\"app\"][\"version\"],\n \"id\": kb_meta[\"appCell\"][\"app\"][\"id\"],\n \"tag\": kb_meta[\"appCell\"][\"app\"][\"tag\"],\n \"catalog_url\": kb_meta[\"attributes\"][\"info\"][\"url\"],\n }\n kb_info[\"params\"] = self._process_app_params(\n kb_meta[\"appCell\"][\"app\"][\"spec\"][\"parameters\"],\n kb_meta[\"appCell\"][\"params\"]\n )\n exec_state = kb_meta[\"appCell\"].get(\"exec\", {})\n exec_result = list()\n job_state = exec_state.get(\"jobState\", {})\n if \"result\" in job_state: # NJS (aka EE1)\n exec_result = job_state[\"result\"]\n elif \"job_output\" in job_state: # EE2\n exec_result = job_state[\"job_output\"].get(\"result\")\n\n ws_client = Workspace(self.ws_url, token=self.token)\n kb_info[\"output\"] = {\n \"widget\": exec_state.get(\"outputWidgetInfo\", {}),\n \"result\": exec_result,\n \"report\": build_report_view_data(self.host, ws_client, exec_result)\n }\n kb_info[\"job\"] = {\n \"state\": \"This app is new, and hasn't been started.\"\n }\n if \"exec\" in kb_meta[\"appCell\"]:\n kb_info[\"job\"][\"state\"] = self._get_job_state(kb_meta[\"appCell\"])\n return kb_info",
"def PatchApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def AppUpdateApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def set_apps(self, new_apps):\n self.remove_apps()\n for app_id in new_apps:\n self.add_app(Webapp.objects.get(pk=app_id))\n index_webapps.delay(new_apps)",
"def update(self, data):\n before = len(self)\n try:\n self._parse_response(data)\n except (IndexError, AttributeError):\n pass\n after = len(self)\n if before == after:\n LOGGER.debug(f'Could not extend AppList for Category \"{self.subcategory.parent.name()}'\n f' - {self.subcategory.name()}\"\\n'\n f'\\tMaxed out at {len(self.apps)} apps')\n raise Maximum()\n LOGGER.debug(f'Updated AppList for Category \"{self.subcategory.parent.name()} - {self.subcategory.name()}\"\\n'\n f'\\tNew number of apps is {len(self.apps)}')\n return self",
"def reject_appl(data, ind):\n global rejected\n global pending_sheet\n rejected.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication rejected.\\n', 'cyan', attrs=['bold']))",
"def process_state_batch(self, batch):\n return batch"
]
| [
"0.5827715",
"0.5719476",
"0.56643564",
"0.5472756",
"0.5467907",
"0.5407844",
"0.53933734",
"0.5391382",
"0.5365639",
"0.53107524",
"0.5281012",
"0.5252762",
"0.5197593",
"0.51716983",
"0.5165487",
"0.51599896",
"0.515447",
"0.5153859",
"0.51419616",
"0.5130705",
"0.51278067",
"0.5094973",
"0.50870866",
"0.5085272",
"0.5077923",
"0.5075951",
"0.50706404",
"0.50665164",
"0.5029229",
"0.50283396"
]
| 0.60474586 | 0 |
Sets the template_name of this UpdateSmtpTemplate. | def template_name(self, template_name):
self._template_name = template_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def template_id(self, template_id):\n\n self._template_id = template_id",
"def setTemplate(self, template):\n self.template = template",
"def set_template(self, name, value):\n\n self.templates[name] = value",
"def template_name(self, template_type: Union[TemplateType, str]) -> str:\n return self.options.get(\"templates\", {}).get(template_type, template_type)",
"def set_email_template_id(self, email_template_id):\n self.email_template_id = email_template_id",
"def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")",
"def template(self, template):\n self._template = template",
"def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')",
"def template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_name\")",
"def template(self, template):\n\n self._template = template",
"def template(self, template):\n\n self._template = template",
"def vm_template_num(self, vm_template_num):\n\n self._vm_template_num = vm_template_num",
"def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")",
"def template_spec(self, template_spec):\n\n self._template_spec = template_spec",
"def template_id(self, template_id):\n if self.local_vars_configuration.client_side_validation and template_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `template_id`, must not be `None`\") # noqa: E501\n\n self._template_id = template_id",
"def save(self, *args, **kwargs):\n if self.pk is None:\n if not self.name.startswith(TEMPLATE_PREFIX):\n self.name = f'{TEMPLATE_PREFIX}{self.name}'\n super(Template, self).save(*args, **kwargs)",
"def topology_name(self, topology_name):\n\n self._topology_name = topology_name",
"def set_thread_name(self, thread_name: str):\n self.thread_name = thread_name",
"def multipart_template(self, multipart_template):\n\n self._multipart_template = multipart_template",
"def set_template(self, template, templateType, blogid=1):\n return self.execute(\"metaWeblog.setTemplate\", self.appkey, blogid, self.username, self.password, template, templateType)",
"def template(self, value: str):\n self._template = value",
"def task_name(self, task_name):\n\n self._task_name = task_name",
"def contact_name(self, contact_name):\n\n self._contact_name = contact_name",
"def contact_name(self, contact_name):\n\n self._contact_name = contact_name",
"def setTemplateParameter(self,name,value):\n self.tplparam[name] = value",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def get_edit_template_name(self, request=None):\n if not self.edit_template_name_ajax:\n return self.edit_template_name\n elif request and request.is_ajax():\n return self.edit_template_name_ajax\n else:\n return self.edit_template_name",
"def set_local_template(self, filename):\n extension = self._get_ext(filename)\n self._validate_extension(extension.upper(), self.ALLOWED_TEMPLATE_EXT)\n\n template = open(filename, 'rb').read().encode('base64')\n\n self.client.service.SetLocalTemplate(template=template, format=extension.upper())",
"def sender_name(self, sender_name):\n\n self._sender_name = sender_name",
"def set_name(self, _name):\n self.name = _name"
]
| [
"0.6428923",
"0.63492084",
"0.6289518",
"0.62629175",
"0.6210519",
"0.6139613",
"0.60831803",
"0.6069467",
"0.6065459",
"0.60353065",
"0.60353065",
"0.58650047",
"0.5752538",
"0.5721378",
"0.56142247",
"0.5564095",
"0.5535376",
"0.55189353",
"0.54267377",
"0.5419198",
"0.54136187",
"0.541289",
"0.54067755",
"0.54067755",
"0.5391117",
"0.5378291",
"0.5347929",
"0.5314622",
"0.5312137",
"0.5303102"
]
| 0.8125446 | 0 |
Sets the html_content of this UpdateSmtpTemplate. | def html_content(self, html_content):
self._html_content = html_content | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def html(self, html):\n\n self._html = html",
"def html(self, html: str):\n self._html = html",
"def update_html(self, plain_text):\n ENGINE.reset()\n self.html = ENGINE.convert(self.content)\n self.save()",
"def html_body(self, val: str):\n self._html_body = val",
"def set_html_body(self, body):\n self._html_body = body",
"def html(self, html):\n if html is None:\n raise ValueError(\"Invalid value for `html`, must not be `None`\")\n\n self._html = html",
"def raw_html(self, HTML):\n self._html = HTML",
"def set_content(self, content):\n self.content = content",
"def set_content(self, content):\n self.content = content",
"def set_content(self, content):\n self.data['content'] = content",
"def innerHTML(self, html: str) -> None:\n if self._inner_element:\n self._inner_element.innerHTML = html\n else:\n super().innerHTML = html # type: ignore",
"def setContent(self, content):\n self.__content = content",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def content(self, content):\n\n self._content = content",
"def content(self, content):\n\n self._content = content",
"def body(self, value):\n self.set_property(\"body\", ItemBody(value, \"HTML\"))",
"def message_body_html(self):\n ...",
"def set_content(self, content):\n\n # pylint: disable=W0201\n self.clear()\n self.content = content",
"def render_html(self):\n return self.template.render(content=self.content, **self.styles)",
"def define_content(self, html):\n self.html_template(html, lang=\"en\")\n self.add_language(\"en\")",
"def content(self, content: str):\r\n self._content = content",
"def setHtmlValue(self, html): #$NON-NLS-1$\r",
"def html_url(self, html_url):\n\n self._html_url = html_url",
"def write_html(self, content):\n self.write(content)",
"def set_content(self, content, content_type=None):\r\n self.content = content\r\n if content_type:\r\n self.content_type = content_type",
"def update_content(self):\n raise NotImplementedError",
"def set_content(self, content):\n if check_data_exist(content) is True:\n self.content = content.text",
"def from_html(self, content):\r\n pass",
"def html(self, HTML):\n if not isinstance(HTML, str):\n raise TypeError\n self._html = HTML.decode(self.encoding, errors='xmlcharrefreplace')",
"def render_htmltext(self, htmltext, context):\r\n return CourseEmailTemplate._render(self.html_template, htmltext, context)"
]
| [
"0.6777866",
"0.6499594",
"0.63532406",
"0.6333052",
"0.6280877",
"0.6247843",
"0.6218343",
"0.6115204",
"0.6115204",
"0.6045669",
"0.59877867",
"0.59831834",
"0.59341025",
"0.58511275",
"0.58511275",
"0.5782569",
"0.5731653",
"0.5688748",
"0.56725824",
"0.5663702",
"0.5633562",
"0.55927104",
"0.55469596",
"0.55439067",
"0.55319643",
"0.5518654",
"0.5501138",
"0.54329616",
"0.53295076",
"0.53004205"
]
| 0.7471381 | 0 |
Sets the html_url of this UpdateSmtpTemplate. | def html_url(self, html_url):
self._html_url = html_url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def html(self, html):\n\n self._html = html",
"def html(self, html):\n if html is None:\n raise ValueError(\"Invalid value for `html`, must not be `None`\")\n\n self._html = html",
"def html(self, html: str):\n self._html = html",
"def _set_url(self): \n self.url = self.geturl()",
"def set_url(self, url):\n self.data['url'] = url",
"def set_url(self, url):\n self.url = url",
"def set_url(self, url):\n if url is not None:\n self.url = url",
"def html_content(self, html_content):\n\n self._html_content = html_content",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def setUrl( self, url ):\n self._urlEdit.setText(str(url))",
"async def respondHTML(self, html):\n self.HTMLResponse = html",
"def set_url(self, url):\n self.url = url",
"def url(self, url: str):\n self._url = url",
"def url(self, url: str):\n\n self._url = url",
"def url(self, url: str):\n\n self._url = url",
"def url(self, url: str):\n\n self._url = url",
"def raw_html(self, HTML):\n self._html = HTML",
"def set_html_body(self, body):\n self._html_body = body",
"def set_error_page(self, html):\n return self.manager.set_error_page(self, html)",
"def webhook_url(self, webhook_url: \"str\"):\n self._attrs[\"webhookUrl\"] = webhook_url",
"def webhook_url(self, webhook_url: \"str\"):\n self._attrs[\"webhookUrl\"] = webhook_url",
"async def set_event_url(self, event_url: Optional[str]) -> None:\n if not event_url:\n event_url = self._server.url\n url = quote(str(event_url), safe=\"\")\n _LOGGER.info(\"Setting event update URL to %s\", url)\n await self._api_request(f\"postURL/{url}\")",
"def send_mail(self, html):\n message = Message(\n From=self._config['mail']['address'], To=self._config['mail']['to'],\n Subject=self._config['mail']['subject']\n )\n message.Html = html\n return self.sender.send(message)",
"def item_web_url(self, item_web_url):\n\n self._item_web_url = item_web_url"
]
| [
"0.62924886",
"0.61350495",
"0.61285585",
"0.5689741",
"0.56686664",
"0.5647783",
"0.55292785",
"0.54630464",
"0.5364144",
"0.5364144",
"0.5364144",
"0.5364144",
"0.5364144",
"0.5364144",
"0.5364144",
"0.535659",
"0.5326547",
"0.5304783",
"0.5257629",
"0.5238786",
"0.5238786",
"0.5238786",
"0.5216872",
"0.52096915",
"0.51738966",
"0.5034883",
"0.5034883",
"0.49920347",
"0.49899206",
"0.49889588"
]
| 0.73619145 | 0 |
Sets the reply_to of this UpdateSmtpTemplate. | def reply_to(self, reply_to):
self._reply_to = reply_to | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_reply_to(self, address):\n if not self.validate_email_address(address):\n raise Exception(\"Invalid email address '%s'\" % address)\n self._reply_to = address",
"def reply_to_email_address(self, val: EmailAddress):\n self._reply_to = val",
"def reply_to_email_address(self):\n return self._reply_to",
"def reply_to(self):\n return self.getattr('reply_to')",
"def reply_to_comment_guid(self, reply_to_comment_guid):\n\n self._reply_to_comment_guid = reply_to_comment_guid",
"def getReplyTo(self):\r\n return self.msg[\"Reply-To\"]",
"def reply_request(self, reply_request):\n if reply_request is None:\n raise ValueError(\"Invalid value for `reply_request`, must not be `None`\") # noqa: E501\n\n self._reply_request = reply_request",
"def reply(self, reply_id):\r\n return Reply(self, reply_id)",
"def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <[email protected]>\"])",
"def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n self._send_form()\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <[email protected]>\"])",
"def reply_to(self):\n return self.receiver.remote_source.address",
"def get_reply_address(self):\n\t\trequest = self.context.get('request')\n\t\tif request and request.venue:\n\t\t\treturn request.venue.support_email_address\n\n\t\treturn self.get_default_reply_address",
"def reply(cls, user, context, message, reply_message):\n pass",
"def set_reply(msg):\n \n result = Message(msg.content, correlation_id=msg.correlation_id ) \n return result",
"def reply(cls, user, context, message, reply_message):\r\n pass",
"def auto_reply_message(self):\n if self._auto_reply is None:\n r = requests.get('https://outlook.office.com/api/v2.0/me/MailboxSettings/AutomaticRepliesSetting',\n headers=self._headers)\n check_response(r)\n self._auto_reply = r.json().get('InternalReplyMessage')\n\n return self._auto_reply",
"def reply(self, body):\n return self.author.send_message(body=body, reply_to=self.id)",
"def reply(self, message, destination=None, reply_to=None, notice=False):\n if destination is None:\n destination = self.default_destination\n\n if reply_to is None:\n reply_to = self._trigger.nick\n\n self._bot.reply(message, destination, reply_to, notice)",
"def test_send_mass_html_mail_reply_to(self, send_mass_html_mail__mock: Mock):\n events = Event.objects.filter(pk=self.event.pk)\n\n admin.EventAdmin.send_mail(Mock(), None, events)\n\n self.assertEqual(send_mass_html_mail__mock.call_count, 1)\n self.assertEqual(send_mass_html_mail__mock.call_args[1]['reply_to'],\n [\"Marie <[email protected]>\"])",
"def reply(self, irc, msg, args, user, id, text):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.error('That\\'s not a note in my database.', Raise=True)\n if note.to != user.id:\n irc.error('You may only reply to notes '\n 'that have been sent to you.', Raise=True)\n self.db.setRead(id)\n text += ' (in reply to #%s)' % id\n public = irc.isChannel(msg.args[0])\n try:\n target = ircdb.users.getUser(note.frm)\n except KeyError:\n irc.error('The user who sent you that note '\n 'is no longer in my user database.', Raise=True)\n id = self.db.send(user.id, note.frm, public, text)\n irc.reply(format('Note #%i sent to %s.', id, target.name))",
"def get_reply_to():\n local,domain = get_base_submission_message_address().split('@')\n while True:\n rand = base64.urlsafe_b64encode(os.urandom(12))\n address = \"{}+{}@{}\".format(local,rand,domain)\n q = Message.objects.filter(reply_to=address)\n if not q:\n return address",
"def update_reply(name, title, reply_id):\n reply = reply_service.get_reply(reply_id)\n if reply:\n if reply.user_id != current_user.id:\n return redirect(url_for(\"post.post\", name=name, title=title))\n form = ReplyForm()\n if form.validate_on_submit():\n reply_service.update_reply(reply, form.reply.data)\n flash(\"Successfully updated reply.\", \"primary\")\n return redirect(url_for(\"post.post\", name=name, title=title))\n form.reply.data = reply.reply\n return render_template(\n \"update_reply.html\", name=name, title=title, reply_id=reply_id, form=form\n )\n else:\n abort(404)",
"def send_reply(self, username, msg_type, content, target, server):\n self.replyer.queue.put(\n message_parsing.Message(pseudo=username, msg_type=msg_type, content=content, target=target, server=server))",
"def set_receive_mail(self):\n self.__mail = True",
"def reply(self, text=None):\n self.message.click()\n self.message.send_keys(Keys.ARROW_RIGHT)\n try:\n self.message.find_element_by_xpath(\"//div[@aria-label='Reply']\").click()\n except NoSuchElementException:\n raise Exception(\"Message has been been deleted\")\n if text is not None:\n self.get_chat().send_message(text)",
"def is_reply(self):\n return (not self.is_forwarded and (\n bool(self.header('In-Reply-To'))\n or bool(re.match(RE_PATTERNS, self.header('Subject', '')))\n ))",
"def routed_to(self, routed_to):\n\n self._routed_to = routed_to",
"def set_auto_reply(self, message, status=AutoReplyStatus.ALWAYS_ENABLED, start=None, end=None,\n external_message=None, audience=AutoReplyAudience.ALL):\n # type: (str, OutlookAccount.AutoReplyStatus, datetime, datetime, str, OutlookAccount.AutoReplyAudience) -> None\n\n start_is_none = start is None\n end_is_none = end is None\n\n if (not start_is_none and end_is_none) or (start_is_none and not end_is_none):\n raise ValueError('Start and End not must both either be None or datetimes')\n\n start_is_datetime = isinstance(start, datetime)\n end_is_datetime = isinstance(end, datetime)\n\n if not start_is_datetime and not start_is_none or not end_is_datetime and not end_is_none:\n raise ValueError('Start and End must both either be None or datetimes')\n\n request_data = dict(Status=status, ExternalAudience=audience)\n\n # Outlook requires both an internal and external message. For convenience, pyOutlook allows only one message\n # and uses that as the external message if none is provided\n if external_message is None:\n external_message = message\n\n request_data.update(InternalReplyMessage=message, ExternalReplyMessage=external_message)\n\n if not start_is_none and not end_is_none:\n request_data.update(ScheduledStartDateTime=dict(DateTime=str(start)))\n request_data.update(ScheduledEndDateTime=dict(DateTime=str(end)))\n\n data = {\n \"@odata.context\": \"https://outlook.office.com/api/v2.0/$metadata#Me/MailboxSettings\",\n \"AutomaticRepliesSetting\": request_data\n }\n\n requests.patch('https://outlook.office.com/api/v2.0/me/MailboxSettings',\n headers=self._headers, data=json.dumps(data))\n\n self._auto_reply = message",
"def edit_reply(praw_comment, reply_msg):\n try:\n praw_comment.edit(reply_msg)\n except Exception as e:\n logger.exception('Exception while editing')\n return False\n\n logger.info(' => Edit was made!')\n return True",
"def reply(self, comment=None):\n return MessageReplyRequestBuilder(self.append_to_request_url(\"reply\"), self._client, comment=comment)"
]
| [
"0.7322558",
"0.7314207",
"0.6876749",
"0.66572744",
"0.59876484",
"0.5975135",
"0.5907021",
"0.56865406",
"0.5629698",
"0.5629698",
"0.5587288",
"0.5441423",
"0.5376826",
"0.5339525",
"0.52932507",
"0.5237598",
"0.5151917",
"0.5135997",
"0.5020687",
"0.5015476",
"0.50064665",
"0.5003214",
"0.49926406",
"0.4966871",
"0.49463102",
"0.48581678",
"0.485092",
"0.47951028",
"0.47825772",
"0.47733983"
]
| 0.7736451 | 0 |
Sets the attachment_url of this UpdateSmtpTemplate. | def attachment_url(self, attachment_url):
self._attachment_url = attachment_url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def attachment_id(self, attachment_id):\n\n self._attachment_id = attachment_id",
"def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url",
"def attachment_file_name(self, attachment_file_name):\n\n self._attachment_file_name = attachment_file_name",
"def attachment_file_name(self, attachment_file_name):\n\n self._attachment_file_name = attachment_file_name",
"def attachment_mime_type(self, attachment_mime_type):\n\n self._attachment_mime_type = attachment_mime_type",
"def url(self, image_url):\n\n self._url = image_url",
"def attachment_file_type(self, attachment_file_type):\n\n self._attachment_file_type = attachment_file_type",
"async def async_set_media_image_url(self, url):\n self._media_image_url = url",
"def attachment_file_id(self, attachment_file_id):\n\n self._attachment_file_id = attachment_file_id",
"def attachment_upload_id(self, attachment_upload_id):\n\n self._attachment_upload_id = attachment_upload_id",
"def attachments(self, attachments):\n\n self._attachments = attachments",
"def attachment_type(self, attachment_type):\n allowed_values = [\"Imported\", \"Linked\", \"URL\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and attachment_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `attachment_type` ({0}), must be one of {1}\" # noqa: E501\n .format(attachment_type, allowed_values)\n )\n\n self._attachment_type = attachment_type",
"def notify_url(self, notify_url):\n\n self._notify_url = notify_url",
"def webhook_url(self, webhook_url: \"str\"):\n self._attrs[\"webhookUrl\"] = webhook_url",
"def webhook_url(self, webhook_url: \"str\"):\n self._attrs[\"webhookUrl\"] = webhook_url",
"def attachment_link(self, on, url=None, querystr=None, **kw):\n assert on in (0, 1, False, True) # make sure we get called the new way, not like the 1.5 api was\n _ = self.request.getText\n if querystr is None:\n querystr = {}\n assert isinstance(querystr, dict) # new in 1.6, only support dicts\n if 'do' not in querystr:\n querystr['do'] = 'view'\n if on:\n pagename, filename = AttachFile.absoluteName(url, self.page.page_name)\n #logging.debug(\"attachment_link: url %s pagename %s filename %s\" % (url, pagename, filename))\n fname = wikiutil.taintfilename(filename)\n if AttachFile.exists(self.request, pagename, fname):\n target = AttachFile.getAttachUrl(pagename, fname, self.request, do=querystr['do'])\n if not 'title' in kw:\n kw['title'] = \"attachment:%s\" % url\n kw['css'] = 'attachment'\n else:\n target = AttachFile.getAttachUrl(pagename, fname, self.request, do='upload_form')\n kw['title'] = _('Upload new attachment \"%(filename)s\"') % {'filename': fname}\n kw['css'] = 'attachment nonexistent'\n return self.url(on, target, **kw)\n else:\n return self.url(on)",
"def download_url(self, download_url):\n self._download_url = download_url",
"def download_url(self, download_url):\n\n self._download_url = download_url",
"def image_url(self, image_url):\n\n self._image_url = image_url",
"def setAddAttachments(self,value):\n self.PDFreactorConfiguration.in1[\"addAttachments\"] = value",
"def set_url(self, url):\n self.data['url'] = url",
"def image_url(self, image_url: str):\n\n self._image_url = image_url",
"def _set_url(self): \n self.url = self.geturl()",
"def set_url(self, url):\n if url is not None:\n self.url = url",
"def set_download_url(self, download_url):\n\n\t\tif download_url is not None and not isinstance(download_url, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: download_url EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__download_url = download_url\n\t\tself.__key_modified['download_url'] = 1",
"def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url",
"def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email",
"def set_url(self, url):\n self.url = url",
"def multipart_template(self, multipart_template):\n\n self._multipart_template = multipart_template",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email"
]
| [
"0.57066125",
"0.56993",
"0.5668265",
"0.5668265",
"0.5528204",
"0.54329574",
"0.54173625",
"0.5387698",
"0.5307973",
"0.5294027",
"0.52891266",
"0.52693737",
"0.51740557",
"0.516391",
"0.516391",
"0.5150971",
"0.5148844",
"0.5120335",
"0.5108294",
"0.50362116",
"0.50353986",
"0.49962872",
"0.49790314",
"0.49720508",
"0.4971301",
"0.49489212",
"0.49279317",
"0.4899036",
"0.48942983",
"0.4883831"
]
| 0.7963267 | 0 |
Load an OpenVINO model for inference from directory. | def _load(path, device=None, cache_dir=None, shapes=None):
status = KerasOpenVINOModel._load_status(path)
if status.get('xml_path', None):
xml_path = Path(status['xml_path'])
invalidInputError(xml_path.suffix == '.xml',
"Path of openvino model must be with '.xml' suffix.")
else:
invalidInputError(False, "nano_model_meta.yml must specify 'xml_path' for loading.")
xml_path = Path(path) / status['xml_path']
thread_num = None
config = status.get('config', {})
if "CPU_THREADS_NUM" in config:
thread_num = int(config["CPU_THREADS_NUM"])
elif "INFERENCE_NUM_THREADS" in config:
thread_num = int(config["INFERENCE_NUM_THREADS"])
if cache_dir is not None:
config["CACHE_DIR"] = cache_dir
if device is None:
device = status.get('device', 'CPU')
model = KerasOpenVINOModel(xml_path,
config=status['config'],
thread_num=thread_num,
device=device,
shapes=shapes)
with open(Path(path) / status['attr_path'], "rb") as f:
attrs = SafePickle.load(f)
for attr_name, attr_value in attrs.items():
setattr(model, attr_name, attr_value)
if os.path.exists(Path(path) / status['compile_path']):
with open(Path(path) / status['compile_path'], "rb") as f:
kwargs = SafePickle.load(f)
model.compile(**kwargs)
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load(path):\n status = KerasOpenVINOModel._load_status(path)\n if status.get('xml_path', None):\n xml_path = Path(status['xml_path'])\n invalidInputError(xml_path.suffix == '.xml',\n \"Path of openvino model must be with '.xml' suffix.\")\n else:\n invalidInputError(False, \"nano_model_meta.yml must specify 'xml_path' for loading.\")\n xml_path = Path(path) / status['xml_path']\n return KerasOpenVINOModel(xml_path)",
"def load_model(self, model_path: str):",
"def load_model(self, filename):\r\n pass",
"def load(path_to_model):\n pass",
"def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)",
"def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner",
"def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()",
"def load_model(self, path):\n pass",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load_model(self, model_path):\n\n model_path = os.path.join(model_path, os.listdir(model_path)[0])\n return rt.InferenceSession(model_path)",
"def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)",
"def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def load_model(self, sess, pb_model_path):\n\n logging.info(\"Import yolo model from pb start .......\")\n\n with sess.as_default():\n with sess.graph.as_default():\n with tf.gfile.FastGFile(pb_model_path, 'rb') as f_handle:\n logging.info(\"ParseFromString start .......\")\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_handle.read())\n logging.info(\"ParseFromString end .......\")\n\n tf.import_graph_def(graph_def, name='')\n logging.info(\"Import_graph_def end .......\")\n\n logging.info(\"Import yolo model from pb end .......\")",
"def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s",
"def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)",
"def load_model(fn, model):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n log(\"Loading model from {}\".format(fn))\n model.saver.restore(model.session, fn)\n log(\"Done loading!\")",
"def load_model(model):\n # Check if the model is a model directory (containing a metagraph and a checkpoint file)\n # or if it is a protobuf file with a frozen graph\n model_exp = os.path.expanduser(model)\n if os.path.isfile(model_exp):\n print('Model filename: %s' % model_exp)\n with tf.gfile.FastGFile(model_exp, 'rb') as f_l:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_l.read())\n tf.import_graph_def(graph_def, name='')\n else:\n print('Model directory: %s' % model_exp)\n meta_file, ckpt_file = get_model_filenames(model_exp)\n\n print('Metagraph file: %s' % meta_file)\n print('Checkpoint file: %s' % ckpt_file)\n\n saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))\n saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))",
"def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr",
"def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model",
"def load_model(fname: os.PathLike) -> Model:\n return Model.load(fname)",
"def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model",
"def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model",
"def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])",
"def load_model(filename):\n return Model.load_savefile(filename)",
"def load_model(self):\n pass",
"def load_model(self, file=None):\n return None",
"def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)"
]
| [
"0.7958504",
"0.7205654",
"0.7140051",
"0.70383066",
"0.6961791",
"0.6906793",
"0.6895013",
"0.68585163",
"0.6748528",
"0.67198235",
"0.6706158",
"0.67056614",
"0.6662578",
"0.66614515",
"0.6654411",
"0.66251373",
"0.6618571",
"0.66169584",
"0.65785086",
"0.65668833",
"0.6556656",
"0.6549856",
"0.6537448",
"0.65336126",
"0.6528294",
"0.6497905",
"0.648323",
"0.6477443",
"0.64748216",
"0.6471997"
]
| 0.7251669 | 1 |
A VIEW token contract. | def token(chain: BaseChain) -> Contract:
return deploy_contract(chain, 'DSToken', args=['VIEW']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view(self) -> 'outputs.ViewDefinitionResponse':\n return pulumi.get(self, \"view\")",
"def auth_token(self):",
"def UserToken(self) -> object:",
"def getToken(self):\n \n raise NotImplementedError",
"def odb_token():\n return genToken()",
"def __call__(self, access_token):",
"def do_view(parser, token):\n\n args = []\n kwargs = {}\n tokens = token.split_contents()\n if len(tokens)<2:\n raise TemplateSyntaxError, (\"%r tag requires one or more arguments\" %\n token.contents.split()[0])\n tag_name = tokens.pop(0)\n url_or_view = tokens.pop(0)\n for token in tokens:\n equals = token.find(\"=\")\n if equals == -1:\n args.append(token)\n else:\n kwargs[str(token[:equals])] = token[equals+1:]\n return ViewNode(url_or_view, args, kwargs)",
"def __require_permission_view(self):\n permission = codechecker_api_shared.ttypes.Permission.PERMISSION_VIEW\n if not self.__has_permission(permission):\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"You are not authorized to execute this action.\")",
"def access_token(self):\n return self.access_token_str",
"async def token(request: Request):\n return get_token()",
"def get_reauth(request):\n client = PlaidAPI.client()\n ubank = request.user.userbank.first()\n if ubank is None:\n return HttpResponseRedirect(\"/setup_bank\")\n access_token = ubank.access_token\n get_pub = client.Item.public_token.create(access_token)\n public_token = get_pub[\"public_token\"]\n cntxt = {\"public_token\": public_token}\n return render(request, \"reauth.html\", cntxt)",
"def view(self) -> str:\n return pulumi.get(self, \"view\")",
"def __call__(self):\n return self.authenticated_token_row",
"async def solicit_token(url, scope):\n rc = RestClient(url, \"\")\n result = await rc.request(\"GET\", f\"/token?scope={scope}\")\n print(result[\"access\"])",
"def _get_view(self, cursor):\n raise NotImplementedError",
"def token(self):\n return self[\"token\"]",
"def view(self):\n raise NotImplementedError",
"def access_token(*args, **kwargs):\n return None",
"def grant_token(request):\n\n grant_token_svc = request.find_service(name=\"grant_token\")\n h_user = request.lti_user.h_user\n\n return {\"grant_token\": grant_token_svc.generate_token(h_user)}",
"def token(uncapped_token: Contract):\n return uncapped_token",
"async def token(request) -> ResponseText:\n return ResponseText(\n \"\".join(random.choices(string.ascii_uppercase + string.digits, k=42)) # noqa: S311\n )",
"def view(self):",
"def get_access_token():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n elif request.method == \"POST\":\n # Authenticate\n auth = Authorization()\n response = auth.post()\n return render_template(\"index.html\", data=response[0])",
"def __str__(self):\n return self.token",
"def claim_token(self, allowed_failures=10):\n count = 0\n while count < allowed_failures:\n count += 1\n try:\n (key, ref) = self.client.get_token(self.view, \n view_params=self.view_params, window_size=100)\n document_index = ref\n if type(ref) == list:\n document_index = ref[0]\n record = self.client.db[document_index]\n modified_record = self.token_modifier.lock(record)\n return (key, ref, self.client.modify_token(modified_record) )\n except ResourceConflict:\n pass\n if count == allowed_failures:\n raise EnvironmentError(\"Unable to claim token.\")",
"def get_access_token(self, minutes: int = 1440) -> str:\n return crypt.encode_token({\n 'uuid': str(self.pk),\n 'space_id': str(self.space_id),\n }, timedelta(minutes=minutes))",
"def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token",
"def view(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"view\")",
"def token(self):\r\n return self._token",
"def mkview(self,\n context=[],\n viewobj=None):\n if viewobj == None:\n raise ValueError, \"mkview: viewobj is None\"\n return jsoncall.do_call(\"mkview\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'viewobj':viewobj.__dict__},\n self.connection)"
]
| [
"0.5675292",
"0.564114",
"0.5602083",
"0.55689275",
"0.5393377",
"0.53466725",
"0.5294759",
"0.5293489",
"0.5281176",
"0.5270128",
"0.52660847",
"0.525514",
"0.5247107",
"0.52470833",
"0.52422994",
"0.5205387",
"0.5194465",
"0.51940876",
"0.51932335",
"0.5175761",
"0.51593035",
"0.5140495",
"0.5127761",
"0.5088469",
"0.50731266",
"0.5057853",
"0.5054957",
"0.5054748",
"0.5052179",
"0.5044124"
]
| 0.7534058 | 0 |
A blank ViewlySeedSale contract. | def sale(chain: BaseChain, token: Contract, beneficiary) -> Contract:
args = [token.address, beneficiary]
seed_sale = deploy_contract(chain, 'ViewlySeedSale', args=args)
token.transact().setOwner(seed_sale.address)
return seed_sale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def running_sale(chain: BaseChain, token: Contract, sale) -> Contract:\n sale.transact().startSale(DURATION, BLOCK_OFFSET)\n chain.wait.for_block(sale.call().startBlock())\n return sale",
"def test_cannot_get_empty_sales(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'This sale does not exist!')\n self.assertEqual(resp.status_code, 400)",
"def test_17_transaction_create_sell_cash(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=100000,\n unit_price=1.17,\n user=user\n )\n\n self.assertTrue(isinstance(sell_cash_eur, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling EUR in cash\")\n print(\"Transaction sell_cash method is returning a valid EUR transaction: {}\".format(\n sell_cash_eur))\n\n \"\"\"Is transaction avoiding short sell cash objects?\"\"\"\n short_sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=500000,\n unit_price=1.10,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_cash_eur, Transaction),\n msg=\"Transaction is NOT avoiding short selling EUR in cash\")\n print(\"Transaction sell_cash method is avoiding a short sell EUR transaction: {}\".format(\n short_sell_cash_eur))",
"def init(self, cr):\n\t\ttools.drop_view_if_exists(cr, 'purchase_order_line_summary')\n\n\t cr.execute(\"\"\" CREATE VIEW purchase_order_line_summary AS (\n\t SELECT max(id) as id,order_id,product_id,name,product_uom,sum(product_qty) as product_qty, \n\t\t\tsum(price_subtotal) as price_subtotal,avg(discount) as discount \n\t\t\tfrom purchase_order_line\n\t\t\tgroup by order_id,product_id,name,product_uom)\n\t\t\t\"\"\")",
"def __init__(self, start: datetime.date, balance: float) -> None:\n Contract.__init__(self, start)\n self.balance = balance * (-1)",
"def sale_call(data):\n print('-' * 80)\n print(\"\")\n print(\"This is the Sales review.\")\n items_tally = get_new_list(data, 4)\n total_sales = count_total_sales(items_tally)\n sales_vals = get_new_list(data, 5)\n values_tally = [int(num) for num in sales_vals]\n count_sales_value(values_tally, total_sales)\n exit_call = continue_exit(data)\n if exit_call:\n return True\n else:\n return False",
"def test_new_empty_invoice_address(self):\r\n self.original = self.env[\"res.partner\"].create({\r\n \"is_company\": False,\r\n \"type\": 'invoice',\r\n \"lastname\": \"\",\r\n \"firstname\": \"\"})",
"def test_sale_service(self):\n sale_order_vals = {\n 'partner_id': self.partner_usd.id,\n 'partner_invoice_id': self.partner_usd.id,\n 'partner_shipping_id': self.partner_usd.id,\n 'order_line': [(0, 0, {\n 'name': self.product_delivery_timesheet2.name,\n 'product_id': self.product_delivery_timesheet2.id,\n 'product_uom_qty': 50,\n 'product_uom': self.product_delivery_timesheet2.uom_id.id,\n 'price_unit': self.product_delivery_timesheet2.list_price\n }),\n ],\n 'pricelist_id': self.pricelist_usd.id,\n }\n sale_order = self.env['sale.order'].create(sale_order_vals)\n sale_order.order_line._compute_product_updatable()\n self.assertTrue(sale_order.order_line[0].product_updatable)\n sale_order.action_confirm()\n sale_order.order_line._compute_product_updatable()\n self.assertFalse(sale_order.order_line[0].product_updatable)\n self.assertEqual(sale_order.invoice_status, 'no', 'Sale Service: there should be nothing to invoice after validation')\n\n # check task creation\n project = self.project_global\n task = project.task_ids.filtered(lambda t: t.name == '%s:%s' % (sale_order.name, self.product_delivery_timesheet2.name))\n self.assertTrue(task, 'Sale Service: task is not created')\n self.assertEqual(task.partner_id, sale_order.partner_id, 'Sale Service: customer should be the same on task and on SO')\n # register timesheet on task\n self.env['account.analytic.line'].create({\n 'name': 'Test Line',\n 'project_id': project.id,\n 'task_id': task.id,\n 'unit_amount': 50,\n 'employee_id': self.employee_manager.id,\n })\n self.assertEqual(sale_order.invoice_status, 'to invoice', 'Sale Service: there should be sale_ordermething to invoice after registering timesheets')\n sale_order.action_invoice_create()\n line = sale_order.order_line\n self.assertTrue(line.product_uom_qty == line.qty_delivered == line.qty_invoiced, 'Sale Service: line should be invoiced completely')\n self.assertEqual(sale_order.invoice_status, 'invoiced', 'Sale Service: SO should be invoiced')\n self.assertEqual(sale_order.tasks_count, 1, \"A task should have been created on SO confirmation.\")\n\n # Add a line on the confirmed SO, and it should generate a new task directly\n product_service_task = self.env['product.product'].create({\n 'name': \"Delivered Service\",\n 'standard_price': 30,\n 'list_price': 90,\n 'type': 'service',\n 'invoice_policy': 'delivery',\n 'uom_id': self.env.ref('product.product_uom_hour').id,\n 'uom_po_id': self.env.ref('product.product_uom_hour').id,\n 'default_code': 'SERV-DELI',\n 'service_type': 'timesheet',\n 'service_tracking': 'task_global_project',\n 'project_id': project.id\n })\n\n self.env['sale.order.line'].create({\n 'name': product_service_task.name,\n 'product_id': product_service_task.id,\n 'product_uom_qty': 10,\n 'product_uom': product_service_task.uom_id.id,\n 'price_unit': product_service_task.list_price,\n 'order_id': sale_order.id,\n })\n\n self.assertEqual(sale_order.tasks_count, 2, \"Adding a new service line on a confirmer SO should create a new task.\")",
"def mock_deposit(obj, overwrite, **kwargs):\n return Preview(source_id=obj.source_id,\n checksum=obj.checksum,\n metadata=Metadata(added=added,\n checksum='foopdfchex==',\n size_bytes=1_234))",
"def test_sale_margin(self):\n self.pricelist.currency_id = self.env.company.currency_id\n self.product.standard_price = 700.0\n sale_order_so11 = self.SaleOrder.create({\n 'date_order': datetime.today(),\n 'name': 'Test_SO011',\n 'order_line': [\n (0, 0, {\n 'name': '[CARD] Individual Workplace',\n 'price_unit': 1000.0,\n 'product_uom': self.product_uom_id,\n 'product_uom_qty': 10.0,\n 'state': 'draft',\n 'product_id': self.product_id}),\n (0, 0, {\n 'name': 'Line without product_uom',\n 'price_unit': 1000.0,\n 'product_uom_qty': 10.0,\n 'state': 'draft',\n 'product_id': self.product_id})],\n 'partner_id': self.partner_id,\n 'partner_invoice_id': self.partner_invoice_address_id,\n 'partner_shipping_id': self.partner_invoice_address_id,\n 'pricelist_id': self.pricelist_id})\n # Confirm the sales order.\n sale_order_so11.action_confirm()\n # Verify that margin field gets bind with the value.\n self.assertEqual(sale_order_so11.margin, 6000.00, \"Sales order profit should be 6000.00\")\n self.assertEqual(sale_order_so11.margin_percent, 0.3, \"Sales order margin should be 30%\")\n sale_order_so11.order_line[1].purchase_price = 800\n self.assertEqual(sale_order_so11.margin, 5000.00, \"Sales order margin should be 5000.00\")",
"def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result",
"def test_inTransSalePC(self):\n # Start a transasction\n pos.click_speed_key(\"Generic Item\")\n \n # Void the item to an empty transaction\n # NOTE: Should uncomment this when related defect is fixed (likely in MERLIN-1335)\n #pos.click(\"Void item\")\n \n # Repeat earlier test\n self.test_basicSalePC()",
"def test_14_transaction_create_sell_bonds(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_bond_alitalia = Transaction.sell_bond(\n portfolio=portfolio,\n asset=\"ALITALIA\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=8,\n unit_price=118000.04,\n user=user\n )\n\n self.assertTrue(isinstance(sell_bond_alitalia, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an ALITALIA bond\")\n print(\"Transaction sell_bond method is returning a valid ALITALIA transaction: {}\".format(\n sell_bond_alitalia))\n\n \"\"\"Is transaction avoiding short sell bond objects?\"\"\"\n short_sell_bond_alitalia = Transaction.sell_bond(\n portfolio=portfolio,\n asset=\"ALITALIA\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=1,\n unit_price=121000,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_bond_alitalia, Transaction),\n msg=\"Transaction is NOT avoiding short selling an ALITALIA stock\")\n print(\"Transaction sell_stock method is avoiding a short sell ALITALIA transaction: {}\".format(\n short_sell_bond_alitalia))",
"def test_create_warranty(self):\n pass",
"def test_sale_margin1(self):\n sale_order_so12 = self.SaleOrder.create({\n 'date_order': datetime.today(),\n 'name': 'Test_SO012',\n 'order_line': [\n (0, 0, {\n 'name': '[CARD] Individual Workplace',\n 'purchase_price': 40.0,\n 'price_unit': 20.0,\n 'product_uom': self.product_uom_id,\n 'product_uom_qty': 1.0,\n 'state': 'draft',\n 'product_id': self.product_id}),\n (0, 0, {\n 'name': 'Line without product_uom',\n 'price_unit': -100.0,\n 'purchase_price': 0.0,\n 'product_uom_qty': 1.0,\n 'state': 'draft',\n 'product_id': self.product_id})],\n 'partner_id': self.partner_id,\n 'partner_invoice_id': self.partner_invoice_address_id,\n 'partner_shipping_id': self.partner_invoice_address_id,\n 'pricelist_id': self.pricelist_id})\n # Confirm the sales order.\n sale_order_so12.action_confirm()\n # Verify that margin field of Sale Order Lines gets bind with the value.\n self.assertEqual(sale_order_so12.order_line[0].margin, -20.00, \"Sales order profit should be -20.00\")\n self.assertEqual(sale_order_so12.order_line[0].margin_percent, -1, \"Sales order margin percentage should be -100%\")\n self.assertEqual(sale_order_so12.order_line[1].margin, -100.00, \"Sales order profit should be -100.00\")\n self.assertEqual(sale_order_so12.order_line[1].margin_percent, 1.00, \"Sales order margin should be 100% when the cost is zero and price defined\")\n # Verify that margin field gets bind with the value.\n self.assertEqual(sale_order_so12.margin, -120.00, \"Sales order margin should be -120.00\")\n self.assertEqual(sale_order_so12.margin_percent, 1.5, \"Sales order margin should be 150%\")",
"def test_total_payments_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_payments, 0)",
"def test_attendant_can_only_view_own_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Benja Maisha',\n username='maisha',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n\n user = dict(\n username='maisha',\n password='Andela8'\n )\n response = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(response.data.decode())\n token = reply['token']\n\n resp = self.client.get(\n '/api/v1/sales/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'You have no access to this sale!')\n self.assertEqual(resp.status_code, 401)",
"def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }",
"def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)",
"def test_11_transaction_create_sell_stock(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_stock_aapl = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"AAPL\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=64,\n unit_price=163.04,\n user=user\n )\n\n sell_stock_msft = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"MSFT\",\n t_currency=TRANSACTION_CURRENCY_EUR,\n amount=32,\n unit_price=76.20,\n user=user\n )\n\n self.assertTrue(isinstance(sell_stock_aapl, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an AAPL stock\")\n print(\"Transaction sell_stock method is returning a valid AAPL transaction: {}\".format(\n sell_stock_aapl))\n\n self.assertTrue(isinstance(sell_stock_msft, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an MSFT stock\")\n print(\"Transaction sell_stock method is returning a valid MSFT transaction: {}\".format(\n sell_stock_msft))\n\n \"\"\"Is transaction avoiding short sell stock objects?\"\"\"\n short_sell_stock_msft = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"MSFT\",\n t_currency=TRANSACTION_CURRENCY_EUR,\n amount=33,\n unit_price=78.20,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_stock_msft, Transaction),\n msg=\"Transaction is NOT avoiding short selling an MSFT stock\")\n print(\"Transaction sell_stock method is avoiding a short sell MSFT transaction: {}\".format(\n short_sell_stock_msft))",
"def test_only_attendant_can_make_a_sale(self):\n resp = self.admin_add_product()\n reply = self.admin_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Unauthorized Access!')\n self.assertEqual(resp.status_code, 401)",
"def contract(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"contract\", public_id)",
"def test_pay_for_nothing(self):\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n data = {\n \"guest_email\": \"[email protected]\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)",
"def test_get_one_sale_record(self):\n\t\tself.register_user()\n\t\tresult = self.login_user()\n\t\taccess_token = json.loads(result.data.decode())['token']\n\n\t\tresponse = self.client.post('/api/v1/sales',\n\t\t\tdata=self.sales_data,\n\t\t\theaders=dict(Authorization=\"Bearer \" + access_token))\n\t\t\n\t\tself.assertEqual(response.status_code, 200)",
"def test_no_contracts(self):\n ProjectContract.objects.all().delete()\n response = self._get()\n self.assertEqual(response.status_code, 200)\n contracts = response.context['contracts']\n self.assertEqual(len(contracts), 0)",
"def test_create_contract_admin_page(self):\n # asserts that there aren't any properties in changelist view\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates the contract\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks it shows in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn(str(self.contract_one_data['rent']), content)",
"def test_sale_margin3(self):\n sale_order_so14 = self.SaleOrder.create({\n 'date_order': datetime.today(),\n 'name': 'Test_SO014',\n 'order_line': [\n (0, 0, {\n 'name': '[CARD] Individual Workplace',\n 'purchase_price': 50.0,\n 'price_unit': 100.0,\n 'product_uom': self.product_uom_id,\n 'product_uom_qty': 3.0,\n 'state': 'draft',\n 'product_id': self.product_id}),\n (0, 0, {\n 'name': 'Line without product_uom',\n 'price_unit': -50.0,\n 'purchase_price': 0.0,\n 'product_uom_qty': 1.0,\n 'state': 'draft',\n 'product_id': self.product_id})],\n 'partner_id': self.partner_id,\n 'partner_invoice_id': self.partner_invoice_address_id,\n 'partner_shipping_id': self.partner_invoice_address_id,\n 'pricelist_id': self.pricelist_id})\n # Confirm the sales order.\n sale_order_so14.action_confirm()\n # Verify that margin field of Sale Order Lines gets bind with the value.\n self.assertEqual(sale_order_so14.order_line[0].margin, 150.00, \"Sales order profit should be 150.00\")\n self.assertEqual(sale_order_so14.order_line[0].margin_percent, 0.5, \"Sales order margin should be 100%\")\n self.assertEqual(sale_order_so14.order_line[1].margin, -50.00, \"Sales order profit should be -50.00\")\n self.assertEqual(sale_order_so14.order_line[1].margin_percent, 1.0, \"Sales order margin should be 100%\")\n # Verify that margin field gets bind with the value.\n self.assertEqual(sale_order_so14.margin, 100.00, \"Sales order profit should be 100.00\")\n self.assertEqual(sale_order_so14.margin_percent, 0.4, \"Sales order margin should be 40%\")",
"def test_cannot_make_sale_with_missing_fields(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'One of the fields is empty!')\n self.assertEqual(resp.status_code, 400)",
"def test_make_default_ach_business(self):\n\n business = self.client.businesses.create({})\n\n FundingSources.get_business_ach_funding_source(business)\n source = FundingSources.get_business_ach_funding_source(business)\n\n default = self.client.funding_sources(source.token).make_default()\n\n verify_payment_card_response_model(\n self, default, {'is_default_account': True})",
"def test_show_cart_empty(client):\n raise NotImplemented('Acceptance test failed')"
]
| [
"0.53047764",
"0.5272284",
"0.5200053",
"0.5149378",
"0.5132936",
"0.512546",
"0.5105635",
"0.5097565",
"0.5085872",
"0.5041826",
"0.5026435",
"0.49941793",
"0.49609843",
"0.4953554",
"0.49455854",
"0.49263102",
"0.4923898",
"0.4923678",
"0.49153724",
"0.49101308",
"0.4887956",
"0.48785767",
"0.48725602",
"0.48491278",
"0.48469725",
"0.48390236",
"0.48284045",
"0.4827669",
"0.48258793",
"0.4823862"
]
| 0.7222951 | 0 |
A running ViewlySeedSale contract. | def running_sale(chain: BaseChain, token: Contract, sale) -> Contract:
sale.transact().startSale(DURATION, BLOCK_OFFSET)
chain.wait.for_block(sale.call().startBlock())
return sale | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sale(chain: BaseChain, token: Contract, beneficiary) -> Contract:\n args = [token.address, beneficiary]\n seed_sale = deploy_contract(chain, 'ViewlySeedSale', args=args)\n token.transact().setOwner(seed_sale.address)\n return seed_sale",
"def sale_call(data):\n print('-' * 80)\n print(\"\")\n print(\"This is the Sales review.\")\n items_tally = get_new_list(data, 4)\n total_sales = count_total_sales(items_tally)\n sales_vals = get_new_list(data, 5)\n values_tally = [int(num) for num in sales_vals]\n count_sales_value(values_tally, total_sales)\n exit_call = continue_exit(data)\n if exit_call:\n return True\n else:\n return False",
"def test_sale_service(self):\n sale_order_vals = {\n 'partner_id': self.partner_usd.id,\n 'partner_invoice_id': self.partner_usd.id,\n 'partner_shipping_id': self.partner_usd.id,\n 'order_line': [(0, 0, {\n 'name': self.product_delivery_timesheet2.name,\n 'product_id': self.product_delivery_timesheet2.id,\n 'product_uom_qty': 50,\n 'product_uom': self.product_delivery_timesheet2.uom_id.id,\n 'price_unit': self.product_delivery_timesheet2.list_price\n }),\n ],\n 'pricelist_id': self.pricelist_usd.id,\n }\n sale_order = self.env['sale.order'].create(sale_order_vals)\n sale_order.order_line._compute_product_updatable()\n self.assertTrue(sale_order.order_line[0].product_updatable)\n sale_order.action_confirm()\n sale_order.order_line._compute_product_updatable()\n self.assertFalse(sale_order.order_line[0].product_updatable)\n self.assertEqual(sale_order.invoice_status, 'no', 'Sale Service: there should be nothing to invoice after validation')\n\n # check task creation\n project = self.project_global\n task = project.task_ids.filtered(lambda t: t.name == '%s:%s' % (sale_order.name, self.product_delivery_timesheet2.name))\n self.assertTrue(task, 'Sale Service: task is not created')\n self.assertEqual(task.partner_id, sale_order.partner_id, 'Sale Service: customer should be the same on task and on SO')\n # register timesheet on task\n self.env['account.analytic.line'].create({\n 'name': 'Test Line',\n 'project_id': project.id,\n 'task_id': task.id,\n 'unit_amount': 50,\n 'employee_id': self.employee_manager.id,\n })\n self.assertEqual(sale_order.invoice_status, 'to invoice', 'Sale Service: there should be sale_ordermething to invoice after registering timesheets')\n sale_order.action_invoice_create()\n line = sale_order.order_line\n self.assertTrue(line.product_uom_qty == line.qty_delivered == line.qty_invoiced, 'Sale Service: line should be invoiced completely')\n self.assertEqual(sale_order.invoice_status, 'invoiced', 'Sale Service: SO should be invoiced')\n self.assertEqual(sale_order.tasks_count, 1, \"A task should have been created on SO confirmation.\")\n\n # Add a line on the confirmed SO, and it should generate a new task directly\n product_service_task = self.env['product.product'].create({\n 'name': \"Delivered Service\",\n 'standard_price': 30,\n 'list_price': 90,\n 'type': 'service',\n 'invoice_policy': 'delivery',\n 'uom_id': self.env.ref('product.product_uom_hour').id,\n 'uom_po_id': self.env.ref('product.product_uom_hour').id,\n 'default_code': 'SERV-DELI',\n 'service_type': 'timesheet',\n 'service_tracking': 'task_global_project',\n 'project_id': project.id\n })\n\n self.env['sale.order.line'].create({\n 'name': product_service_task.name,\n 'product_id': product_service_task.id,\n 'product_uom_qty': 10,\n 'product_uom': product_service_task.uom_id.id,\n 'price_unit': product_service_task.list_price,\n 'order_id': sale_order.id,\n })\n\n self.assertEqual(sale_order.tasks_count, 2, \"Adding a new service line on a confirmer SO should create a new task.\")",
"def run_seeds(self, nbrun):\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_RUNNING_SEEDS)\n self._notify_listeners_start_operation(listener.OPERATION_RUN_SEEDS)\n rsol = self.agent.run_seeds(nbrun)\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n return rsol",
"def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }",
"def test_17_transaction_create_sell_cash(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=100000,\n unit_price=1.17,\n user=user\n )\n\n self.assertTrue(isinstance(sell_cash_eur, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling EUR in cash\")\n print(\"Transaction sell_cash method is returning a valid EUR transaction: {}\".format(\n sell_cash_eur))\n\n \"\"\"Is transaction avoiding short sell cash objects?\"\"\"\n short_sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=500000,\n unit_price=1.10,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_cash_eur, Transaction),\n msg=\"Transaction is NOT avoiding short selling EUR in cash\")\n print(\"Transaction sell_cash method is avoiding a short sell EUR transaction: {}\".format(\n short_sell_cash_eur))",
"async def _sell_live_task(self, trade: Dict[str, Any], label: str, sell_type: str,\n detection_name: str, trigger_data: Dict[str, Any], remit: bool):\n\n order_id = await self._submit_trade_sell(trade)\n\n if order_id is not None:\n await self._update_trade_sell(trade, order_id)\n await self._register_trade_sell(trade, label, sell_type, detection_name, trigger_data)\n\n if remit:\n base, _, trade_base_pair = common.get_pair_elements(trade['pair'])\n reserved = await self._get_open_trades_value(trade_base_pair)\n filled_quantity = trade['quantity'] - trade['remaining']\n adjusted_proceeds = filled_quantity * (trade['close_value'] - trade['open_value'])\n await self.balancer.handle_remit_request(base, trade['base_value'], reserved, adjusted_proceeds)\n\n return order_id",
"def perform_strategy(self):\r\n number = randint(0, 111)\r\n\r\n myEnvelope = self._envelopeList[number]\r\n\r\n print(myEnvelope)",
"def token(chain: BaseChain) -> Contract:\n return deploy_contract(chain, 'DSToken', args=['VIEW'])",
"def do_turn(self, price: int) -> SalesmanAction:",
"def test_14_transaction_create_sell_bonds(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_bond_alitalia = Transaction.sell_bond(\n portfolio=portfolio,\n asset=\"ALITALIA\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=8,\n unit_price=118000.04,\n user=user\n )\n\n self.assertTrue(isinstance(sell_bond_alitalia, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an ALITALIA bond\")\n print(\"Transaction sell_bond method is returning a valid ALITALIA transaction: {}\".format(\n sell_bond_alitalia))\n\n \"\"\"Is transaction avoiding short sell bond objects?\"\"\"\n short_sell_bond_alitalia = Transaction.sell_bond(\n portfolio=portfolio,\n asset=\"ALITALIA\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=1,\n unit_price=121000,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_bond_alitalia, Transaction),\n msg=\"Transaction is NOT avoiding short selling an ALITALIA stock\")\n print(\"Transaction sell_stock method is avoiding a short sell ALITALIA transaction: {}\".format(\n short_sell_bond_alitalia))",
"def contract(docid):\n return render_template('doc.html', docid=docid)",
"def test_11_transaction_create_sell_stock(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_stock_aapl = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"AAPL\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=64,\n unit_price=163.04,\n user=user\n )\n\n sell_stock_msft = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"MSFT\",\n t_currency=TRANSACTION_CURRENCY_EUR,\n amount=32,\n unit_price=76.20,\n user=user\n )\n\n self.assertTrue(isinstance(sell_stock_aapl, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an AAPL stock\")\n print(\"Transaction sell_stock method is returning a valid AAPL transaction: {}\".format(\n sell_stock_aapl))\n\n self.assertTrue(isinstance(sell_stock_msft, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling an MSFT stock\")\n print(\"Transaction sell_stock method is returning a valid MSFT transaction: {}\".format(\n sell_stock_msft))\n\n \"\"\"Is transaction avoiding short sell stock objects?\"\"\"\n short_sell_stock_msft = Transaction.sell_stock(\n portfolio=portfolio,\n asset=\"MSFT\",\n t_currency=TRANSACTION_CURRENCY_EUR,\n amount=33,\n unit_price=78.20,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_stock_msft, Transaction),\n msg=\"Transaction is NOT avoiding short selling an MSFT stock\")\n print(\"Transaction sell_stock method is avoiding a short sell MSFT transaction: {}\".format(\n short_sell_stock_msft))",
"def do_sell():\n order_size = calculate_sell_order_size()\n if order_size is None:\n return None\n i = 1\n while i <= CONF.trade_trials:\n sell_price = calculate_sell_price(get_current_price())\n order = create_sell_order(sell_price, order_size)\n if order is None:\n LOG.error(\"Could not create sell order over %s\", order_size)\n return None\n write_action('-SELL')\n order_status = poll_order_status(order.id, 10)\n if order_status == 'open':\n cancel_order(order)\n i += 1\n daily_report()\n else:\n return order\n write_action('-SELL')\n return create_market_sell_order(order_size)",
"async def generate_new_decentralised_id(\n self, amount: uint64, tx_config: TXConfig, fee: uint64 = uint64(0)\n ) -> Optional[SpendBundle]:\n\n coins = await self.standard_wallet.select_coins(uint64(amount + fee), tx_config.coin_selection_config)\n if coins is None:\n return None\n\n origin = coins.copy().pop()\n genesis_launcher_puz = SINGLETON_LAUNCHER_PUZZLE\n launcher_coin = Coin(origin.name(), genesis_launcher_puz.get_tree_hash(), amount)\n\n did_inner: Program = await self.get_new_did_innerpuz(launcher_coin.name())\n did_inner_hash = did_inner.get_tree_hash()\n did_full_puz = create_singleton_puzzle(did_inner, launcher_coin.name())\n did_puzzle_hash = did_full_puz.get_tree_hash()\n\n announcement_set: Set[Announcement] = set()\n announcement_message = Program.to([did_puzzle_hash, amount, bytes(0x80)]).get_tree_hash()\n announcement_set.add(Announcement(launcher_coin.name(), announcement_message))\n\n tx_record: Optional[TransactionRecord] = await self.standard_wallet.generate_signed_transaction(\n amount,\n genesis_launcher_puz.get_tree_hash(),\n tx_config,\n fee,\n coins,\n None,\n False,\n announcement_set,\n origin_id=origin.name(),\n )\n\n genesis_launcher_solution = Program.to([did_puzzle_hash, amount, bytes(0x80)])\n\n launcher_cs = CoinSpend(launcher_coin, genesis_launcher_puz, genesis_launcher_solution)\n launcher_sb = SpendBundle([launcher_cs], AugSchemeMPL.aggregate([]))\n eve_coin = Coin(launcher_coin.name(), did_puzzle_hash, amount)\n future_parent = LineageProof(\n eve_coin.parent_coin_info,\n did_inner_hash,\n uint64(eve_coin.amount),\n )\n eve_parent = LineageProof(\n launcher_coin.parent_coin_info,\n launcher_coin.puzzle_hash,\n uint64(launcher_coin.amount),\n )\n await self.add_parent(eve_coin.parent_coin_info, eve_parent)\n await self.add_parent(eve_coin.name(), future_parent)\n\n if tx_record is None or tx_record.spend_bundle is None:\n return None\n\n # Only want to save this information if the transaction is valid\n did_info: DIDInfo = DIDInfo(\n launcher_coin,\n self.did_info.backup_ids,\n self.did_info.num_of_backup_ids_needed,\n self.did_info.parent_info,\n did_inner,\n None,\n None,\n None,\n False,\n self.did_info.metadata,\n )\n await self.save_info(did_info)\n eve_spend = await self.generate_eve_spend(eve_coin, did_full_puz, did_inner)\n full_spend = SpendBundle.aggregate([tx_record.spend_bundle, eve_spend, launcher_sb])\n assert self.did_info.origin_coin is not None\n assert self.did_info.current_inner is not None\n\n did_record = TransactionRecord(\n confirmed_at_height=uint32(0),\n created_at_time=uint64(int(time.time())),\n amount=uint64(amount),\n to_puzzle_hash=await self.standard_wallet.get_puzzle_hash(False),\n fee_amount=fee,\n confirmed=False,\n sent=uint32(10),\n spend_bundle=full_spend,\n additions=full_spend.additions(),\n removals=full_spend.removals(),\n wallet_id=self.id(),\n sent_to=[],\n trade_id=None,\n type=uint32(TransactionType.INCOMING_TX.value),\n name=bytes32(token_bytes()),\n memos=[],\n )\n regular_record = dataclasses.replace(tx_record, spend_bundle=None)\n await self.wallet_state_manager.add_pending_transaction(regular_record)\n await self.wallet_state_manager.add_pending_transaction(did_record)\n return full_spend",
"def _validateSale(self, player: Player, company: PublicCompany, amount: int, kwargs: MutableGameState):\n my_purchases = kwargs.purchases[kwargs.stock_round_count].get(player, [])\n\n my_stock = player.hasStock(company)\n potential_owners = company.potentialPresidents()\n\n validations = [\n err(company not in my_purchases,\n \"You can't sell something you already bought: {} {}\",\n company.id, company.short_name),\n\n err(\n my_stock >= amount,\n \"You must have as much stock than you are trying to sell {}\",\n amount\n ),\n\n err(\n company.availableStock(StockPurchaseSource.BANK) + amount <= 60,\n \"You can't sell that much ({}); the bank can only have 50 shares max.\",\n amount\n ),\n\n err(\n len(company.potentialPresidents() - {player}) > 0 or my_stock - amount >= 20,\n \"There are no other potential presidents, so you can't sell your shares. {} / {} (original stock: {})\",\n \",\".join([p.id for p in company.potentialPresidents()]),\n company.name,\n str(company.owners.get(player))\n\n ),\n\n err(amount % STOCK_CERTIFICATE == 0,\n \"You can only sell in units of 10 stocks ({})\".format(amount),\n ),\n\n err(kwargs.stock_round_count > 1,\n \"You can only sell after the first stock round.\")\n ]\n\n return self.validate(validations)",
"def sell():\n return apology(\"TODO\")",
"def test_inTransSalePC(self):\n # Start a transasction\n pos.click_speed_key(\"Generic Item\")\n \n # Void the item to an empty transaction\n # NOTE: Should uncomment this when related defect is fixed (likely in MERLIN-1335)\n #pos.click(\"Void item\")\n \n # Repeat earlier test\n self.test_basicSalePC()",
"def test_call(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_dummy)\n config = runner._scenario.configspace.get_default_configuration()\n\n SEED = 2345\n status, cost, _, _ = runner.run(config=config, instance=None, seed=SEED, budget=None)\n\n assert cost == SEED\n assert status == StatusType.SUCCESS",
"async def _sell_sim(self, trade: Dict[str, Any], label: str, sell_type: str=None,\n detection_name: str=None, trigger_data: dict=None, remit: bool=True) -> asyncio.Future:\n\n pair = trade['pair']\n adjusted_value = self.market.adjusted_close_values[pair][-1]\n adjusted_proceeds = adjusted_value * trade['quantity']\n adjusted_fees = adjusted_proceeds * config['trade_fee_percent']\n current_time = self.market.close_times[pair][-1]\n\n trade['close_time'] = current_time\n trade['close_value'] = adjusted_value\n trade['fees'] += adjusted_fees\n\n await self._simulate_sell_balances(trade, remit, adjusted_proceeds, adjusted_fees)\n await self._register_trade_sell(trade, label, sell_type, detection_name, trigger_data)\n\n future = asyncio.Future()\n future.set_result(uuid.uuid4().hex)\n return future",
"def test_monitor_contract_single_event_once(self):\n deposit_value = to_wei(1, 'ether')\n self._create_deposit_event()\n listener = EventListener(rpc_provider=self.provider)\n\n tx_hash = self.bank_contract.functions.deposit(). \\\n transact({'from': self.web3.eth.accounts[0], 'value': deposit_value})\n\n listener.execute()\n\n self.assertEqual(len(bank_deposit_events), 1, \"Deposit event listener fired\")\n self.assertEqual(bank_deposit_events[0].args.amount, deposit_value, \"Argument fetched correctly\")",
"def test_participate_with_signed_address(chain, crowdsale, customer, customer_id, token, private_key):\n\n address_bytes = get_address_as_bytes(customer)\n sign_data = sign(address_bytes, private_key)\n\n time_travel(chain, crowdsale.call().startsAt() + 1)\n wei_value = to_wei(1, \"ether\")\n assert crowdsale.call().getState() == CrowdsaleState.Funding\n crowdsale.transact({\"from\": customer, \"value\": wei_value}).buyWithSignedAddress(customer_id, sign_data[\"v\"], sign_data[\"r_bytes\"], sign_data[\"s_bytes\"])\n\n # We got credited\n assert token.call().balanceOf(customer) > 0\n\n # We have tracked the investor id\n events = crowdsale.pastEvents(\"Invested\").get()\n assert len(events) == 1\n e = events[0]\n assert e[\"args\"][\"investor\"] == customer\n assert e[\"args\"][\"weiAmount\"] == wei_value\n assert e[\"args\"][\"customerId\"] == customer_id",
"def worker(args):\n\n # Step 1. Create the NDSE view request object\n # Set the url where you want the recipient to go once they are done\n # with the NDSE. It is usually the case that the\n # user will never \"finish\" with the NDSE.\n # Assume that control will not be passed back to your app.\n view_request = ConsoleViewRequest(return_url=args[\"ds_return_url\"])\n if args[\"starting_view\"] == \"envelope\" and args[\"envelope_id\"]:\n view_request.envelope_id = args[\"envelope_id\"]\n\n # Step 2. Get the console view url\n # Exceptions will be caught by the calling function\n api_client = create_api_client(base_path=args[\"base_path\"], access_token=args[\"access_token\"])\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_console_view(account_id=args[\"account_id\"], console_view_request=view_request)\n url = results.url\n return {\"redirect_url\": url}",
"def test_deposit_amount_view(self):\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_deposit')\n amount_1 = random.randint(10, 50000)\n amount_2 = random.randint(10, 50000)\n\n request_1 = client.post(url, {'amount': amount_1}, format='json')\n self.account.refresh_from_db()\n\n request_2 = client.post(url, {'amount': amount_2}, format='json')\n self.account.refresh_from_db()\n\n self.assertEqual(amount_1 + amount_2, self.account.current_balance)",
"def test_create_contract(token, carrier):\n assert token.call().dividendsCarrier() == carrier.address",
"def run_seeds(self, nbrun):\n self._raise_not_supported()",
"def sell(self):\n self.status = \"sold\"\n return self",
"def create_sepa_xml(cls, qs):\n\n batch_id = timezone.datetime.strftime(timezone.now(), '%Y%m%d%H%I%S')\n\n sepa = SepaDocument(sepa_type='CT')\n\n sepa.set_initiating_party(\n name=settings.BANK_ACCOUNT_DONATIONS['name']\n )\n debtor = SepaAccount(\n name=settings.BANK_ACCOUNT_DONATIONS['name'],\n iban=settings.BANK_ACCOUNT_DONATIONS['iban'],\n bic=settings.BANK_ACCOUNT_DONATIONS['bic']\n )\n\n sepa.set_debtor(debtor)\n sepa.set_info(message_identification=batch_id, payment_info_id=batch_id)\n sepa.set_initiating_party(name=settings.BANK_ACCOUNT_DONATIONS['name'])\n\n now = timezone.now()\n\n for payout in qs.all():\n payout.status = StatusDefinition.IN_PROGRESS\n payout.submitted = now\n payout.save()\n creditor = SepaAccount(\n name=payout.receiver_account_name,\n iban=payout.receiver_account_iban,\n bic=payout.receiver_account_bic\n )\n\n sepa.add_credit_transfer(\n creditor=creditor,\n amount=payout.amount_payable,\n creditor_payment_id=payout.invoice_reference\n )\n\n return sepa.as_xml()",
"async def sell(self):\n if len(self.factory.foobar) > 0:\n how_many = min(len(self.factory.foobar), random.randint(1, 5))\n foobars = [self.factory.get_foobar() for x in range(how_many)]\n await self.wait(10)\n for foobar in foobars:\n self.say(f\"Selling {foobar} for 1€\")\n self.factory.money += 1\n else:\n self.say(\"Nothing to sell\")",
"def test_access_sales_person(self):\n # Salesperson can see only their own sales order\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_salesman_2']).read()\n # Now assign the SO to themselves\n self.order.write({'user_id': self.company_data['default_user_salesman_2'].id})\n self.order.with_user(self.company_data['default_user_salesman_2']).read()\n # Salesperson can change a Sales Team of SO\n self.order.with_user(self.company_data['default_user_salesman_2']).write({'team_id': self.company_data['default_sale_team'].id})\n # Salesperson can't create the SO of other salesperson\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_salesman_2']).create({\n 'partner_id': self.partner_a.id,\n 'user_id': self.company_data['default_user_salesman'].id\n })\n # Salesperson can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_salesman_2']).unlink()\n # Salesperson can confirm the SO\n self.order.with_user(self.company_data['default_user_salesman_2']).action_confirm()"
]
| [
"0.76732326",
"0.5601624",
"0.5313552",
"0.5237656",
"0.5188305",
"0.51351535",
"0.49769667",
"0.4958542",
"0.49149904",
"0.4875205",
"0.4873706",
"0.48463243",
"0.48374844",
"0.48354483",
"0.48138532",
"0.48032266",
"0.47857577",
"0.4776219",
"0.47309092",
"0.46917468",
"0.46829563",
"0.46750927",
"0.46698546",
"0.46438396",
"0.46396166",
"0.4628092",
"0.46277723",
"0.46262702",
"0.4625092",
"0.46229476"
]
| 0.6293835 | 1 |
Creates embeddings for every item in the loader | def get_embeddings(model, loader, device=torch.device('cpu')):
embeddings = []
labels = []
for item in loader:
data, label = item
data = data.view(-1, 1, data.shape[-1])
data = data.to(device)
label = label.to(device)
output = model(data).squeeze(1)
embedding = output.cpu().data.numpy()
label = label.cpu().data.numpy()
embeddings.append(embedding)
labels.append(label)
embeddings = np.array(embeddings)
labels = np.array(labels)
return embeddings, labels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]",
"def set_embeddings(self):",
"def init_emb(self):\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)",
"def load_embeddings(config, name, vocab, training_generator, validation_generator):\n\n # Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.\n # Applies down the road when/if we attempt active learning\n data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension\n train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')\n valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')\n \n \n if os.path.exists(train_embed_pkl_f):\n with open( train_embed_pkl_f, 'rb') as cache:\n train_embeddings = pickle.load(cache)\n\n with open(valid_embed_pkl_f, 'rb') as cache:\n valid_embeddings = pickle.load(cache)\n else:\n # get embeddings from scratch\n tokenizer = AutoTokenizer.from_pretrained(vocab)\n embedding_model = AbstractBert(vocab) \n\n if torch.cuda.device_count() > 1:\n print(\"GPUs Available: \", torch.cuda.device_count())\n embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])\n \n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n embedding_model.eval().to(device)\n\n logger.info(' Getting BERT/ROBERTA embeddings...')\n\n train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config[\"metadata\"])\n valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config[\"metadata\"])\n\n # save embeddings\n pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))\n pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))\n\n logger.info(' Saved full BERT/ROBERTA embeddings.')\n\n embedding_shape = train_embeddings['embeddings'][1].shape[0]\n\n return embedding_shape, train_embeddings, valid_embeddings",
"def init(self, preload_embeddings):\n\t\tself.__find_metadata()\n\t\tself.__parse_embedding_metadata()\n\t\tself.__parse_model_metadata()\n\t\t# should we load all of the word embeddings into memory now?\n\t\tif preload_embeddings:\n\t\t\tlog.info(\"Preloading word embeddings ...\")\n\t\t\tfor embed_id in self.embedding_meta:\n\t\t\t\tself.get_embedding(embed_id)\t\n\t\t\tlog.info(\"Preloaded %d word embeddings\" % len(self.embedding_cache))",
"def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()",
"def embeddings_layers_init(self):\n\n user_embeddings = tf.keras.layers.Embedding(\n self.n_users, self.user_dim, input_length=1)\n\n item_embeddings = tf.keras.layers.Embedding(\n self.n_items, self.item_dim, input_length=1)\n\n return user_embeddings, item_embeddings",
"def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings",
"def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)",
"def produce_outputs(self):\n # if self.loaded_aggregated:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n # need to calc term numeric index for aggregation\n\n\n # if self.loaded_preprocessed:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n self.embeddings = np.ndarray((0, len(self.term_list)), dtype=np.int32)\n for idx in self.indices.get_train_test():\n texts = Text.get_strings(self.text.data.get_slice(idx))\n vecs = bagger.map_collection(texts, fit=False, transform=True)\n self.embeddings = np.append(self.embeddings, vecs, axis=0)\n del texts\n\n # texts = Text.get_strings(self.text.data.get_slice(test_idx))\n # vec_test = bagger.map_collection(texts, fit=do_fit)\n # del texts\n\n # self.embeddings = np.vstack((vec_train, vec_test))\n\n # self.embeddings = np.append(vec_train, vec_test)\n # self.vector_indices = (np.arange(len(train)), np.arange(len(test)))\n\n # set misc required variables\n self.set_constant_elements_per_instance()",
"def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]",
"def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)",
"def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)",
"def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}",
"def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)",
"def make_embedding(self, user_ids, item_ids):\n embed = []\n for uid, bid in zip(user_ids, item_ids):\n user = self.user_concats[uid]\n item = self.item_concats[bid]\n embed_concat = np.expand_dims(np.concatenate([user,item], 0), 0)\n embed.append(embed_concat)\n embed = np.concatenate(embed, 0)\n return embed",
"def setup_embeddings(self):\n with vs.variable_scope(\"embeddings\"):\n vec_embeddings = tf.get_variable(\"embeddings\", initializer=self.pretrained_embeddings, trainable=False)\n context_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.context_placeholder)\n question_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.question_placeholder)\n context_embeddings = tf.reshape(context_batch_embeddings,\n (-1, self.max_context_len, self.vocab_dim))\n question_embeddings = tf.reshape(question_batch_embeddings,\n (-1, self.max_question_len, self.vocab_dim))\n return context_embeddings, question_embeddings",
"def add_embed_itmes(data):\n for k, v in data.items() :\n embed.add_embed_field(name=k, value=v)",
"def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs",
"def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)",
"def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")",
"def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)",
"def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding",
"def generate_embeddings_sentence_test_data(data, path_out):\n flair.device = torch.device('cpu')\n dicts = []\n # init multilingual BERT\n bert_embedding = TransformerDocumentEmbeddings('bert-base-multilingual-cased')\n counter = 0\n for entry in data:\n print(\"Counter: \", counter)\n counter += 1\n text = entry[\"sentence\"]\n id = entry[\"id\"]\n sent = Sentence(text)\n bert_embedding.embed(sent)\n vec = sent.get_embedding().detach().numpy()\n dicts.append((id,vec))\n gc.collect()\n result = dicts\n file = open(path_out, \"wb\")\n pickle.dump(result, file)\n file.close()\n return result",
"def _embeddings(self, xs):\n n_feats, batch_size, seq_len = xs.size()\n\n assert n_feats == self.n_feats\n\n res = [emb(x) for emb, x in zip(self.embeddings, xs)]\n x = torch.cat(res, 2)\n\n return x",
"def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer",
"def __glove_embed__(sequence, model):\n embedded = []\n for word in sequence:\n embedded.append(model[word])\n return embedded",
"def load_embeddings_models():\n\n\t# ---LOADING WORD2VEC MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the word2vec model\")\n\tword2vec_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# word2vec_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING FASTTEXT MODEL---\n\tmodel_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')\n\tstart_time = time.time()\n\tprint(\"Started loading the fasttext model\")\n\tfasttext_model = KeyedVectors.load_word2vec_format(model_path)\n\t# fasttext_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\t\n\n\t# ---LOADING PT-LKB MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')\n\t# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')\n\tstart_time = time.time()\n\tprint(\"Started loading the PT-LKB-64 model\")\n\tptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# ptlkb64_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING GLOVE-300 MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the GLOVE 300 dimensions model\")\n\tglove300_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# glove300_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING NUMBERBATCH MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the NUMBERBATCH dimensions model\")\n\tnumberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# numberbatch_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\treturn word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model",
"def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2",
"def load_data(args):\n if args.use_mnist:\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,),\n (0.3081,))\n ])\n print(\"Loading vocab...\")\n with open(args.vocab_loc, 'rb') as f:\n vocab = pickle.load(f)\n print(\"number of unique tokens: %d\" % len(vocab))\n\n print(\"Get data loader...\")\n train_loader = get_mnist_loader(\n vocab=vocab, train=True, download=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2\n )\n test_loader = get_mnist_loader(\n vocab=vocab, train=False, download=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=2\n\n )\n\n else:\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\n ])\n print(\"Loading vocab...\")\n with open(args.vocab_loc, 'rb') as f:\n vocab = pickle.load(f)\n print(\"number of unique tokens: %d\" % len(vocab))\n\n print(\"Get data loader...\")\n train_loader = get_loader(\n root=args.images_loc, json=args.captions_loc, vocab=vocab, train=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2\n )\n test_loader = get_loader(\n root=args.images_loc, json=args.captions_loc, vocab=vocab, train=False,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=2\n )\n\n # Input: word vector\n if args.embeddings_loc:\n print(\"\\nLoading word embeddings from %s\" % args.embeddings_loc)\n if 'google' in args.embeddings_loc.lower() and args.embeddings_loc.endswith('.bin'):\n w2v = KeyedVectors.load_word2vec_format(args.embeddings_loc, binary=True)\n emb_size = w2v.vector_size\n elif 'glove' in args.embeddings_loc.lower() and args.embeddings_loc.endswith('.txt'):\n w2v, emb_size = load_glove_vec(args.embeddings_loc)\n else:\n print(\"ERROR: unknown embedding file %s\" % args.embeddings_loc)\n return\n\n embeddings = np.random.uniform(-0.1, 0.1, size=(len(vocab), emb_size))\n for word, idx in vocab.word2idx.items():\n if word in w2v:\n embeddings[idx] = w2v[word]\n else:\n print(\"\\nCreating random word embeddings of size %dx%d\" % (len(vocab), args.embedding_size))\n embeddings = np.random.uniform(-0.1, 0.1, size=(len(vocab), args.embedding_size))\n\n return vocab, train_loader, test_loader, embeddings"
]
| [
"0.67582995",
"0.66326225",
"0.6497315",
"0.63573134",
"0.63262516",
"0.6318618",
"0.62867075",
"0.6257674",
"0.6143834",
"0.6126161",
"0.6125036",
"0.6078507",
"0.6047278",
"0.6044256",
"0.59935606",
"0.5948337",
"0.594831",
"0.5913711",
"0.59136695",
"0.5887492",
"0.58869636",
"0.58764595",
"0.58725256",
"0.5871641",
"0.5870882",
"0.5837683",
"0.581053",
"0.5791506",
"0.5788093",
"0.577826"
]
| 0.66390866 | 1 |
Generate a new ocpnplugins.xml. | def generate(sourcedir, destfile, version, date):
tree = ET.Element('plugins')
version_elem = ET.SubElement(tree, "version")
version_elem.text = version
date_elem = ET.SubElement(tree, "date")
date_elem.text = \
date if date else datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
for path in sorted(Path(sourcedir).glob("*.xml")):
try:
subtree = ET.parse(str(path))
except ET.ParseError as ex:
errprint("Error processing %s at line %d" % (path, ex.position[0]))
errprint("Skipping file")
continue
subtree.getroot().tag = 'plugin'
tree.append(subtree.getroot())
dom = minidom.parseString(ET.tostring(tree))
with open(destfile, "w") as f:
for line in dom.toprettyxml(indent=" ").split("\n"):
line = line.rstrip()
if line:
f.write(line + "\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self):\n self.output.write(\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n\"\"\")\n\n self.write_plugin_info()\n self.write_timers()\n self.write_custom_events()\n self.write_databases()\n self.write_resources()\n self.write_macroses()\n\n self.output.write(\"</plugin>\")",
"def generate(self, info):\n root = ET.Element(\"libs\")\n\n # Set target\n target = ET.SubElement(root, \"target\")\n target.text = info.target\n\n # Set time info\n time_start = ET.SubElement(root, \"start_time\")\n time_start.text = info.start_time.strftime(\"%H-%m-%Y %H:%M:%S\")\n\n time_end = ET.SubElement(root, \"end_time\")\n time_end.text = info.end_time.strftime(\"%H-%m-%Y %H:%M:%S\")\n\n # WordPress info\n wordpress = ET.SubElement(root, \"wordpress\")\n wordpress.set(\"current_version\", info.wordpress_info.current_version)\n wordpress.set(\"last_version\", info.wordpress_info.latest_version)\n\n # Set CVE\n if info.wordpress_info.vulnerabilities:\n cves = ET.SubElement(wordpress, \"cves\")\n for cve in info.wordpress_info.vulnerabilities:\n xml_cve = ET.SubElement(cves, \"cve\")\n xml_cve.text = cve\n\n # Plugins info\n plugins = ET.SubElement(root, \"plugins\")\n for plugin in info.plugins:\n xml_plugin = ET.SubElement(plugins, \"plugin\")\n xml_plugin.text = plugin.plugin_name\n\n xml_plugin.set(\"current_version\", plugin.current_version)\n xml_plugin.set(\"last_version\", plugin.latest_version)\n xml_plugin.set(\"url\", plugin.plugin_uri)\n xml_plugin.set(\"outdated\", \"Yes\" if plugin.is_outdated else \"No\")\n\n # Set CVE\n if plugin.cves:\n cves = ET.SubElement(xml_plugin, \"cves\")\n for cve in plugin.cves:\n xml_cve = ET.SubElement(cves, \"cve\")\n xml_cve.text = cve\n\n # Set exploits\n if plugin.cves:\n exploits = ET.SubElement(xml_plugin, \"exploits\")\n for exploit in plugin.exploits:\n xml_exploit = ET.SubElement(exploits, \"exploits\")\n xml_exploit.text = exploit\n\n return root",
"def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False",
"def create_xml_plugin(xml_node):\n pluginxml = PluginXML(xml_node)\n plugin = find_plugin(pluginxml.get_class_name())\n if plugin is None:\n return None\n\n return plugin(xml_node)",
"def generate(self):\n py_gen = PythonGenerator([], \"\", self.plugin_stub.description)\n code_fragments = {\n \"autogenerated_module_path\": self.language.wrap_directory.path,\n \"autogenerated_module\": self.language.get_python_autogenerated_module().path,\n \"new_autogenerated_module\": self.language.get_python_new_autogenerated_module().path,\n \"fix_plugin\": py_gen.make_fix_plugin(),\n \"module_namespace\": \"_madz__{}\".format(str(self.plugin_stub.id.namespace).replace(\".\", \"__\")),\n \"init_path\": self.language.get_plugin_init().path,\n \"ctypes_wrapper_path\": self.language.get_python_ctypes_wrapper().path,\n \"module_hooks\": py_gen.make_module_hook(),\n \"type_accessors\" : py_gen.make_type_accessor(None),\n \"cleanup_code\": py_gen.make_cleanup_code(None),\n \"imported_functions\": \"\",\n \"in_structs\": \"\",\n \"dep_module_hooks\": \"\",\n \"dep_cleanup_code\": \"\",\n \"imp_module_hooks\": \"\",\n \"imp_cleanup_code\": \"\",\n \"typedefs\": \"\",\n \"functions\": py_gen.make_def_function_types(),\n \"out_structs\": py_gen.make_out_struct(),\n \"plugin_cname\": self.language.output_directory.file(\"{}.madz\".format(self.plugin_stub.id.namespace)).path,\n \"function_callbacks\": py_gen.make_function_callbacks(),\n \"function_stubs\": py_gen.make_function_stubs()\n }\n\n cstdlib = {\n \"windows\": \"'MSVCRT'\",\n \"unix\": \"'c'\",\n \"osx\": \"'c'\"\n }[config_target.get(OptionPlatformOperatingSystem)]\n\n self.prep()\n self._pre_header =\"#include \\\"Python.h\\\"\\n\"\n self._post_header = py_gen.make_c_header()\n\n c_wrapgen.WrapperGenerator.generate(self)\n\n c_source = py_gen.make_c_init(self.language.get_python_code_filename())\n c_source += py_gen.make_get_out_struct()\n c_source += py_gen.make_get_python_out_struct()\n c_source += py_gen.make_c_function_stubs()\n\n all_deps = self.plugin_stub.gen_recursive_loaded_depends()\n # depends plugins python\n for dep in all_deps:\n gen = PythonGenerator([], dep.id.namespace, dep.description)\n\n code_fragments[\"imported_functions\"] += gen.make_def_function_types()\n code_fragments[\"typedefs\"] += gen.make_typedefs()\n code_fragments[\"in_structs\"] += gen.make_out_struct()\n code_fragments[\"dep_module_hooks\"] += \" \" + gen.make_module_hook()\n code_fragments[\"dep_cleanup_code\"] += \"{}\\n{}\".format(gen.make_type_accessor(False), gen.make_cleanup_code(False))\n\n c_source += gen.make_get_in_struct()\n\n # imports plugins python\n for imp in self.plugin_stub.gen_required_loaded_imports():\n if not (imp in all_deps):\n gen = PythonGenerator([], imp.id.namespace, imp.description)\n\n code_fragments[\"imported_functions\"] += gen.make_def_function_types()\n code_fragments[\"typedefs\"] += gen.make_typedefs()\n code_fragments[\"in_structs\"] += gen.make_out_struct()\n code_fragments[\"imp_module_hooks\"] += \" \" + gen.make_module_hook()\n code_fragments[\"imp_cleanup_code\"] += \"{}\\n{}\".format(gen.make_type_accessor(True), gen.make_cleanup_code(True))\n\n c_source += gen.make_get_in_struct()\n\n # This plugins python\n code_fragments[\"typedefs\"] += py_gen.make_typedefs()\n\n module_string = self.autogenerated_module_template.format(cstdlib = cstdlib)\n with self.language.get_python_autogenerated_module().pyopen(\"w\") as f:\n f.write(module_string)\n\n with self.language.get_python_new_autogenerated_module().pyopen(\"w\") as f:\n f.write(module_string)\n\n with self.language.get_python_ctypes_wrapper().pyopen(\"w\") as f:\n f.write(self.ctypes_wrapper_template)\n\n with self.language.get_c_code_filename().pyopen(\"a\") as f:\n f.write(\"\\n{}\\n\".format(c_source))\n\n with self.language.get_python_code_filename().pyopen(\"w\") as f:\n f.write(self.py_template.format(**code_fragments))",
"def emit(self, ctx, modules, fd):\n if ctx.opts.debug or ctx.opts.verbose:\n print('JNC plugin starting')\n if not ctx.opts.ignore:\n for (epos, etag, _) in ctx.errors:\n if (error.is_error(error.err_level(etag)) and\n etag in ('MODULE_NOT_FOUND', 'MODULE_NOT_FOUND_REV')):\n self.fatal(\"%s contains errors\" % epos.top.arg)\n if (etag in ('TYPE_NOT_FOUND', 'FEATURE_NOT_FOUND',\n 'IDENTITY_NOT_FOUND', 'GROUPING_NOT_FOUND')):\n util.print_warning(msg=(etag.lower() + ', generated class ' +\n 'hierarchy might be incomplete.'), key=etag)\n else:\n util.print_warning(msg=(etag.lower() + ', aborting.'), key=etag)\n self.fatal(\"%s contains errors\" % epos.top.arg)\n\n # Sweep, adding included and imported modules, until no change\n module_set = set(modules)\n num_modules = 0\n while num_modules != len(module_set):\n num_modules = len(module_set)\n for module in list(module_set):\n imported = map(lambda x: x.arg, util.search(module, 'import'))\n included = map(lambda x: x.arg, util.search(module, 'include'))\n for (module_stmt, rev) in self.ctx.modules:\n if module_stmt in chain(imported, included):\n module_set.add(self.ctx.modules[(module_stmt, rev)])\n\n # Generate files from main modules\n for module in filter(lambda s: s.keyword == 'module', module_set):\n self.generate_from(module)\n\n # Generate files from augmented modules\n for aug_module in context.augmented_modules.values():\n self.generate_from(aug_module)\n\n # Print debug messages saying that we're done.\n if ctx.opts.debug or ctx.opts.verbose:\n if not self.ctx.opts.no_classes:\n print('Java classes generation COMPLETE.')\n if not self.ctx.opts.no_schema:\n print('Schema generation COMPLETE.')",
"def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()",
"def pyang_plugin_init():\n plugin.register_plugin(OpenConfigPlugin())",
"def generate_xml(self, provisioning):\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return self.provisioning2xml(provisioning)",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def _generate_objects_file(self):\n xmls = glob(f'{ROOT}/Annotations/**/*.xml', recursive=True)",
"def write_xosc(self, generated_xml):\n reparsed_xml = minidom.parseString(generated_xml).toprettyxml(indent=\" \")\n xosc_file = open(self._filepath, \"w\")\n xosc_file.write(reparsed_xml)\n xosc_file.close()\n\n msg = QMessageBox()\n if self._warning_message:\n msg.setIcon(QMessageBox.Warning)\n text = f\"Exported OpenSCENARIO file {self._filepath} has warnings!\\n\\n\"\n text += \"\\n\".join(self._warning_message)\n else:\n msg.setIcon(QMessageBox.Information)\n text = f\"Successfully exported OpenSCENARIO file to {self._filepath}\"\n msg.setText(text)\n msg.setWindowTitle(\"OpenSCENARIO Export\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec()",
"def generate():",
"def generate(self, context=None):\r\n outputfile = self.__get_output_filename()\r\n # For output type 'hcr', write the binary repository file\r\n if self.output_obj.type == 'hcr':\r\n self.logger.info(\"Generating binary repository to '%s'\" % outputfile)\r\n writer = HcrWriter()\r\n repo = self.output_obj.get_hcr_repository()\r\n data = writer.get_repository_bindata(repo)\r\n f = context.create_file(outputfile, mode='wb')\r\n #f = open(outputfile,'wb')\r\n try: f.write(data)\r\n finally: f.close()\r\n elif self.output_obj.type == 'header':\r\n self.logger.info(\"Generating header file to '%s'\" % outputfile)\r\n writer = HeaderWriter(outputfile, self.output_obj)\r\n writer.write(context)\r\n elif self.output_obj.type == None:\r\n # The HCRML file contains no <output> element, so no output should\r\n # be generated\r\n pass",
"def create_config(context, target_repoids, debug, test, tasks, on_aws=False):\n context.makedirs(os.path.dirname(DNF_PLUGIN_DATA_PATH), exists_ok=True)\n with context.open(DNF_PLUGIN_DATA_PATH, 'w+') as f:\n config_data = build_plugin_data(\n target_repoids=target_repoids, debug=debug, test=test, tasks=tasks, on_aws=on_aws\n )\n json.dump(config_data, f, sort_keys=True, indent=2)",
"def generate_nature(out):\n\n check_path(\n os.path.join(out, BASE),\n f\"The script should automatically generate this file. Something went wrong in the last step.\")\n\n nature = os.path.join(out, NATURE)\n shutil.copytree(\"evaluation/ablation/nature\", nature)\n shutil.copy(\"evaluation/src/utils.h\", nature)",
"def new_plugin(ctx, **defaults):\n from .quickstart import plugin_quickstart\n\n project = ctx.get_project(silent=True)\n plugin_quickstart(defaults, project=project)",
"def _package_plugins(ctx):\n print(\"\\n\\n-- Creating Zip Files \\n\")\n\n project_dir = Path(__file__).parent\n plugins_projects = [\n x for x in (project_dir / \"build/build_directory_for_tests/\").iterdir() if x.is_dir()\n ]\n artifacts_dir = project_dir / \"build/artifacts\"\n\n plugins_zip = project_dir / \"build/plugin_zip\"\n if plugins_zip.exists():\n shutil.rmtree(plugins_zip)\n\n plugins_zip.mkdir()\n\n for project in plugins_projects:\n plugins_dirs = [\n x for x in (project / \"plugin\").iterdir() if x.is_dir() and (x / \"assets\").exists()\n ]\n hm_generator = HookManGenerator(\n hook_spec_file_path=project_dir / f\"tests/plugins/{project.name}/hook_specs.py\"\n )\n\n for plugin in plugins_dirs:\n (plugin / \"artifacts\").mkdir()\n if sys.platform == \"win32\":\n shutil.copy2(src=artifacts_dir / f\"{plugin.name}.dll\", dst=plugin / \"artifacts\")\n else:\n shutil.copy2(src=artifacts_dir / f\"lib{plugin.name}.so\", dst=plugin / \"artifacts\")\n\n hm_generator.generate_plugin_package(\n package_name=plugin.name, plugin_dir=plugin, dst_path=plugins_zip\n )",
"def _plugin_create(cls, plugin_dir):\n plugin_path = os.path.join(settings.PLUGINS_PATH, plugin_dir,\n 'metadata.yaml')\n try:\n plugin_metadata = cls._parse_yaml_file(plugin_path)\n Plugin.create(plugin_metadata)\n except Exception as e:\n logger.error(\"cannot create plugin {0} from FS. Reason: {1}\"\n .format(plugin_dir, str(e)))",
"def generate_xml(self, locations):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n kernel = Kerneladapter()\n\n for locname in locations:\n xml_location = ET.SubElement(xmlroot, 'location')\n location = kernel.location_info(locname)\n ET.SubElement(xml_location, \"location\").text = unicode(locname)\n ET.SubElement(xml_location, \"height\").text = unicode(location['height'])\n ET.SubElement(xml_location, \"attributes\").text = unicode(location['attributes'])\n ET.SubElement(xml_location, \"floorlevel\").text = unicode(location['floorlevel'])\n ET.SubElement(xml_location, \"preference\").text = unicode(location['preference'])\n ET.SubElement(xml_location, \"info\").text = unicode(location['info'])\n ET.SubElement(xml_location, \"reserved_for\").text = unicode(location['reserved_for'])\n\n for mui in location['allocated_by']:\n unit = kernel.unit_info(mui)\n xml_unit = ET.SubElement(xml_location, \"unit\")\n ET.SubElement(xml_unit, \"mui\").text = unicode(unit['mui'])\n ET.SubElement(xml_unit, \"quantity\").text = unicode(unit['quantity'])\n ET.SubElement(xml_unit, \"artnr\").text = unicode(unit['product'])\n ET.SubElement(xml_unit, \"height\").text = unicode(unit['height'])\n ET.SubElement(xml_unit, \"pick_quantity\").text = unicode(unit['pick_quantity'])\n ET.SubElement(xml_unit, 'created_at').text = unit['created_at'].strftime('%Y-%m-%d %H:%M:%S')\n ET.SubElement(xml_unit, \"movements\").text = unicode(unit['movements'])\n ET.SubElement(xml_unit, \"picks\").text = unicode(unit['picks'])\n ET.SubElement(xml_unit, \"attributes\").text = unicode(unit['attributes'])\n try:\n product = produktpass.models.Product.objects.get(artnr=unit['product'])\n ET.SubElement(xml_unit, \"product_name\").text = unicode(product.name)\n except produktpass.models.Product.DoesNotExist:\n ET.SubElement(xml_unit, \"product_name\").text = '???'\n\n return xmlroot",
"def create_conf_xml(self):\n path = os.path.join(\n self.buildout['buildout']['parts-directory'],\n self.name)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n xml_path = os.path.join(path, 'uwsgi.xml')\n\n conf = \"\"\n for key, value in self.conf.items():\n if value.lower() in ('true', 'on', 'yes'):\n conf += \"<%s/>\\n\" % key\n elif value and value.lower() not in ('false', 'off', 'yes'):\n conf += \"<%s>%s</%s>\\n\" % (key, value, key)\n\n\n requirements, ws = self.egg.working_set()\n eggs_paths = [dist.location for dist in ws]\n eggs_paths.extend(self.get_extra_paths())\n # order preserving unique\n unique_egg_paths = []\n for p in eggs_paths:\n if p not in unique_egg_paths:\n unique_egg_paths.append(p)\n\n for path in map(realpath, unique_egg_paths):\n conf += \"<pythonpath>%s</pythonpath>\\n\" % path\n\n f = open(xml_path, 'w')\n f.write(\"<uwsgi>\\n%s</uwsgi>\" % conf)\n f.close()\n return xml_path",
"def create_components_h(self, sourceRoot, outputRoot=None):\n if outputRoot is None: outputRoot = sourceRoot\n hardcoreDir = os.path.join(sourceRoot, 'modules', 'hardcore')\n operaDir = os.path.join(hardcoreDir, 'opera')\n changed = util.readTemplate(os.path.join(operaDir, 'components_template.h'),\n os.path.join(outputRoot, 'modules', 'hardcore', 'opera', 'components.h'),\n ComponentTemplateActionHandler(self.components()))\n if sourceRoot == outputRoot:\n util.updateModuleGenerated(hardcoreDir, [\"opera/components.h\"])\n return changed",
"def create_plugin_files(config: Config) -> Config:\n c4d_symbols_file = os.path.join(\n config.destination,\n \"res\",\n \"c4d_symbols.h\"\n )\n c4d_symbols_content = \"\"\"enum\n{\n\n};\"\"\"\n\n if not os.path.isfile(c4d_symbols_file):\n assert_directories(c4d_symbols_file, True)\n\n with open(c4d_symbols_file, \"w\") as f:\n f.write(c4d_symbols_content)\n\n c4d_strings_file = os.path.join(\n config.destination,\n \"res/strings_us\",\n \"c4d_strings.str\"\n )\n\n if not os.path.isfile(c4d_strings_file):\n assert_directories(c4d_strings_file, True)\n\n with open(c4d_strings_file, \"w\") as f:\n f.write(\"\")\n\n return config",
"def createObject(self, *args):\n return _libsbml.CompSBasePlugin_createObject(self, *args)",
"def create_osm_conf(self, stage_dir=None):\n result = INI_TEMPLATE.format(\n points_attributes=','.join(self.points),\n lines_attributes=','.join(self.lines),\n multipolygons_attributes=','.join(self.polygons)\n )\n with open(self.output_ini, 'w') as f:\n f.write(result)\n return self.output_ini",
"def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()",
"def create_hardcore_opera_inc(self, sourceRoot, outputRoot=None):\n if outputRoot is None: outputRoot = sourceRoot\n hardcoreDir = os.path.join(sourceRoot, \"modules\", \"hardcore\")\n operaDir = os.path.join(hardcoreDir, \"opera\")\n template = os.path.join(operaDir, \"opera_template.inc\")\n hardcore_opera_inc = os.path.join(outputRoot, \"modules\", \"hardcore\", \"opera\", \"hardcore_opera.inc\")\n changed = util.readTemplate(template, hardcore_opera_inc,\n self.getTemplateActionHandler(sourceRoot))\n if sourceRoot == outputRoot:\n util.updateModuleGenerated(hardcoreDir, [\"opera/hardcore_opera.inc\"])\n return changed",
"def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)",
"def create_components_inc(self, sourceRoot, outputRoot=None):\n if outputRoot is None: outputRoot = sourceRoot\n hardcoreDir = os.path.join(sourceRoot, 'modules', 'hardcore')\n componentDir = os.path.join(hardcoreDir, 'component')\n changed = util.readTemplate(os.path.join(componentDir, 'OpComponentCreate_template.inc'),\n os.path.join(outputRoot, 'modules', 'hardcore', 'component', 'OpComponentCreate.inc'),\n ComponentTemplateActionHandler(self.components()))\n if sourceRoot == outputRoot:\n util.updateModuleGenerated(hardcoreDir, [\"component/OpComponentCreate.inc\"])\n return changed",
"def custom_package_xml_generator(directory, packagename=None, version='45.0', filename='package.xml'):\n\n METADATA_TYPE = {\n 'applications':'CustomApplication', 'aura':'AuraDefinitionBundle', 'classes':'ApexClass', 'customPermissions':'CustomPermission', \n 'flexipages':'FlexiPage', 'flows':'Flow', 'globalValueSets':'GlobalValueSet', 'labels':'CustomLabels', 'layouts':'Layout',\n 'lwc': 'LightningComponentBundle', 'objects':'CustomObject', 'pages':'ApexPage', 'permissionsets':'PermissionSet', 'profiles':'Profile',\n 'staticresources':'StaticResource', 'tabs':'CustomTab', 'triggers':'ApexTrigger', 'contentassets':'ContentAsset', 'pathAssistants':'PathAssistant',\n 'quickActions':'QuickAction', 'remoteSiteSettings':'RemoteSiteSetting', 'workflows':'Workflow', 'dashboards':'Dashboard', 'reports':'Report',\n 'cspTrustedSites':'CspTrustedSite',\n }\n\n \"\"\"\n Non-implemented Metadata:\n 'ApexComponent', 'CustomMetadata' (needs custom manipulation), 'CustomObjectTranslation', 'DuplicateRule', \n 'FlowCategory', 'GlobalValueSetTranslation', 'MatchingRules',\n \"\"\"\n #read directory structure\n\n mdtypedirs = os.listdir(directory)\n\n nested_mdt_object = ['ValidationRule', 'CompactLayout', 'ListView', 'SharingReason', 'RecordType']\n nested_mdt_workflow = ['WorkflowFieldUpdate', 'WorkflowKnowledgePublish', 'WorkflowTask', 'WorkflowAlert', 'WorkflowSend', 'WorkflowOutboundMessage', 'WorkflowRule']\n\n # start our xml structure\n root = xml.Element('Package')\n root.set('xmlns','http://soap.sforce.com/2006/04/metadata')\n\n for mdtype in mdtypedirs:\n # create child node for each type of component\n if mdtype in METADATA_TYPE.keys():\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = str(METADATA_TYPE[mdtype])\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n \n if mdtype == 'objects':\n for nest_mdtyp in nested_mdt_object:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n if mdtype == 'workflows':\n for nest_mdtyp in nested_mdt_workflow:\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = nest_mdtyp\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n #Custom behavior for custom labels\n if mdtype == 'labels':\n etype = xml.SubElement(root, 'types')\n ename = xml.SubElement(etype, 'name')\n ename.text = 'CustomLabel'\n emember = xml.SubElement(etype, 'members')\n emember.text = str('*')\n\n # add the final xml node package.api_version\n eversion = xml.SubElement(root, 'version')\n eversion.text = str(version)\n\n #package name\n if packagename != None:\n efname = xml.SubElement(root, 'fullName')\n efname.text = str(packagename)\n\n #pretty format for xml\n xmlstring = xml.tostring(root)\n reparsed = minidom.parseString(xmlstring)\n prettyxml = reparsed.toprettyxml(indent=' ', newl='\\n', encoding='UTF-8')\n \n #generate xml file from string\n try:\n with open(os.path.join(directory, filename), \"bw\") as xml_file:\n xml_file.write(prettyxml)\n except IOError:\n pass"
]
| [
"0.66747826",
"0.57758486",
"0.5733098",
"0.5577461",
"0.5462274",
"0.5429898",
"0.53631663",
"0.52749896",
"0.52611405",
"0.5177107",
"0.51359594",
"0.51344335",
"0.5099594",
"0.50879645",
"0.50810367",
"0.50763667",
"0.50714844",
"0.5053349",
"0.50524765",
"0.50442195",
"0.5025927",
"0.50247085",
"0.49977842",
"0.49165854",
"0.49127305",
"0.4906832",
"0.49047717",
"0.49033654",
"0.4898577",
"0.4868428"
]
| 0.6637632 | 1 |
Remove automations for a Tasmota device. | async def async_remove_automations(hass, device_id):
await device_trigger.async_remove_triggers(hass, device_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AptUninstall(vm):\n remove_str = 'sudo apt-get --purge autoremove -y '\n for package in APT_PACKAGES:\n vm.RemoteCommand(remove_str + package)",
"def clear_all_devices():\n adapter = get_adapter()\n for key in devices_by_adr.keys():\n device = get_device(key)\n try:\n adapter.RemoveDevice(device) \n except DBusException:\n print(\"could not remove\", device)",
"async def async_device_removed(event):\n if event.data[\"action\"] != \"remove\":\n return\n await async_remove_automations(hass, event.data[\"device_id\"])",
"def KillAllAnts(cls):\n cls.antArray.clear()",
"def cleanup_aai(cls):\n logger.info(\"####################### Start to clean up AAI settings\")\n aai = Customer.get_by_global_customer_id(\"5GCustomer\")\n aai.delete()",
"def delete_unavailable_devices():\n _run_command('delete unavailable')",
"def rmMot(self, mot):\n for m in self.motors:\n if m.nom == mot:\n self.motors.remove(m)\n else:\n print('No motor named ' + mot + ' in simulateur ' + self.nom + '.')",
"def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device",
"def OnRemoveAutomation(self, event, automation):\n\n self.app.RemoveAutomation(automation)\n for child in self.GetChildren():\n child.Destroy()\n\n self.Draw()",
"def remove_device(self, path):\n pass",
"def removeDevice(self, node, fullDeviceName):",
"def AptUninstall(vm):\n _Uninstall(vm)",
"def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)",
"def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False",
"def eraseAll(self): # remove all robots\n\t\tself.__robotList = []",
"def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)",
"def _delete_sai_test_cases(ptfhost):\n logger.info(\"Delete SAI tests cases\")\n ptfhost.file(path=\"{0}\".format(SAI_TEST_CASE_DIR_ON_PTF), state=\"absent\")",
"def _removeSpecs(self):\n self.specGenerator.removeSpecs()",
"def clean(self):\n os.remove(self.apk_path)",
"def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)",
"def unpair(self):\n xcrun.simctl.unpair_devices(self)",
"def remove_robots(): #py:remove_robots\n RUR._remove_robots_()",
"def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)",
"def test_delete_device(self):\n pass",
"def test_delete_device(self):\n pass",
"def test_duo_application_delete(self):\n pass",
"def remove_many_descriptors(self, uuids):",
"def remove_hero(apps, schema_editor):\n pass",
"def rm(cli):\n __check_in_autonotes_dir()\n\n # File args\n files = cli.config.rm.file\n\n # Remove the files\n __rm(files)",
"def __del__(self):\n self.DcMotor.run(Adafruit_MotorHAT.RELEASE) # changed rightMotor to DcMotor , RFMH_2019_02_28\n del self.motorhat"
]
| [
"0.60911405",
"0.60012424",
"0.5825712",
"0.5750088",
"0.56554925",
"0.56551033",
"0.5607625",
"0.5587846",
"0.55782807",
"0.55749226",
"0.5565252",
"0.5552191",
"0.5530161",
"0.5520082",
"0.55070865",
"0.5495222",
"0.5493418",
"0.5478428",
"0.5467731",
"0.5467223",
"0.5455837",
"0.5434102",
"0.5431828",
"0.54232633",
"0.54232633",
"0.54220974",
"0.54076827",
"0.54017365",
"0.5399396",
"0.5372945"
]
| 0.6749313 | 0 |
Handle the removal of a device. | async def async_device_removed(event):
if event.data["action"] != "remove":
return
await async_remove_automations(hass, event.data["device_id"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _handle_device_remove(hass: HomeAssistant):\n\n async def device_registry_updated(event: Event):\n if event.data['action'] != 'update':\n return\n\n registry = hass.data['device_registry']\n hass_device = registry.async_get(event.data['device_id'])\n\n # check empty identifiers\n if not hass_device or not hass_device.identifiers:\n return\n\n identifier = next(iter(hass_device.identifiers))\n\n # handle only our devices\n if identifier[0] != DOMAIN or hass_device.name_by_user != 'delete':\n return\n\n # remove from Mi Home\n for gw in hass.data[DOMAIN].values():\n if not isinstance(gw, Gateway3):\n continue\n gw_device = gw.get_device(identifier[1])\n if not gw_device:\n continue\n gw.debug(f\"Remove device: {gw_device['did']}\")\n gw.miio.send('remove_device', [gw_device['did']])\n break\n\n # remove from Hass\n registry.async_remove_device(hass_device.id)\n\n hass.bus.async_listen('device_registry_updated', device_registry_updated)",
"def remove_device(self, path):\n pass",
"def removeDevice(self, node, fullDeviceName):",
"def OnDeviceRemoval(self, serial_number):\r\n cam_list = self.system.GetCameras()\r\n count = cam_list.GetSize()\r\n print('System event handler:')\r\n print('\\tDevice %i was removed from the system.' % serial_number)\r\n print('\\tThere %s %i %s on the system.' % ('is' if count == 1 else 'are',\r\n count,\r\n 'device' if count == 1 else 'devices'))",
"def remove_device(request, pk):\n device = get_object_or_404(Laptop, pk=pk)\n context = {}\n if request.method == 'POST':\n form = RemovalForm(request.POST)\n if form.is_valid():\n device.mdm_enrolled = False\n device.serial = None\n device.asset_tag = None\n device.last_ip = None\n device.last_checkin = None\n device.save()\n template = loader.get_template('default.html')\n return HttpResponse(template.render({'title': 'Device Removed',\n 'message': 'This device is no longer associated with the MDM.',\n 'EXIT_BTN': True, 'EXIT_URL': reverse(\"mdm:list\"), 'NO_FOOT': True},\n request))\n else:\n context['form'] = RemovalForm(request.POST)\n else:\n if device.serial == 'DISCONNECTED':\n context['form'] = RemovalForm(uninstalled=True)\n else:\n context['form'] = RemovalForm()\n return render(request, 'form_crispy.html', context)",
"def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device",
"def test_delete_device(self):\n pass",
"def test_delete_device(self):\n pass",
"def remove_device(self, device, port=161):\n try:\n self.devices = self.devices.remove(device)\n except ValueError:\n pass",
"def device_disconnect(self):\n pass",
"def delete_device(self, device: Device) -> None:\n self._devices.pop(device.name, None)",
"def handle_remove(self):\r\n self.del_common()",
"def delete_device(device):\n if device in devices.list():\n devices.delete(device)\n return '', 204\n else:\n raise BadRequest('The given device name does not exist')",
"def remove(self, device_id):\n with self.lock:\n if device_id in self.devices:\n del self.devices[device_id]",
"def delete_device(self):\n # PROTECTED REGION ID(SdpMasterLeafNode.delete_device) ENABLED START #\n # PROTECTED REGION END # // SdpMasterLeafNode.delete_device",
"def delete_device(self):\n # PROTECTED REGION ID(CbfSubarray.delete_device) ENABLED START #\n\n pass\n # PROTECTED REGION END # // CbfSubarray.delete_device",
"def Remove(self, event):\n pass",
"def delete_device(self):\n # PROTECTED REGION ID(CspSubElementSubarray.delete_device) ENABLED START #\n # PROTECTED REGION END # // CspSubElementSubarray.delete_device",
"def onRemove(self):\n pass",
"def onRemove(self):\n pass",
"def delete_device(cls, device_id, token):\n\n tenant = init_tenant_context(token, db)\n orm_device = assert_device_exists(device_id)\n data = serialize_full_device(orm_device, tenant)\n\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.remove(data, meta={\"service\": tenant})\n\n db.session.delete(orm_device)\n db.session.commit()\n\n results = {'result': 'ok', 'removed_device': data}\n return results",
"def ProcessUnregister(self, msg):\n # Check the management token.\n token, response = self.CheckToken()\n if not token:\n return response\n\n # Unregister the device.\n self.server.UnregisterDevice(token['device_token'])\n\n # Prepare and send the response.\n response = dm.DeviceManagementResponse()\n response.unregister_response.CopyFrom(dm.DeviceUnregisterResponse())\n\n return (200, response)",
"def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)",
"def test_delete_device_by_id(self):\n pass",
"def remove_this_device_from_input(self, loggly_input):\n\n path = 'inputs/%s/removedevice/' % loggly_input.id\n\n response = self._loggly_delete(path)\n\n return \"%s:%s\" % (response.status_code, response.text)",
"async def on_terncy_svc_remove(event):\n dev_id = event.data[\"dev_id\"]\n _LOGGER.info(\"terncy svc remove %s\", dev_id)\n if not tern.is_connected():\n await tern.stop()",
"def device_event(observer, device):\n if (device.action == \"add\"):\n print(\"conectado\")\n name = device.sys_name\n print(name)\n print(name[len(name) - 4])\n if(name[len(name) - 4] == \":\"):\n print(\"device mala\")\n else:\n time.sleep(5)\n try:\n with open(\"/media/usb0/LABSD.txt\", \"r\") as f:\n data = f.readlines()\n except IOError:\n print('cannot open')\n else:\n dataprocess(data)\n f.close()\n elif (device.action == \"remove\"):\n print(\"desconectado\")\n else:\n print(\"error\")",
"def process_IN_DELETE(self, event):",
"def _handle_delete(self):\n\n cookbookapp_functions.delete_chosen_recipe()\n self._handle_return()",
"def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)"
]
| [
"0.807616",
"0.71581817",
"0.70584047",
"0.69244736",
"0.67771333",
"0.6472483",
"0.64140743",
"0.64140743",
"0.6394929",
"0.63806874",
"0.63092977",
"0.6286962",
"0.62104803",
"0.6186973",
"0.61726636",
"0.6109445",
"0.61076254",
"0.6100079",
"0.60897523",
"0.60897523",
"0.60718846",
"0.60363597",
"0.59737873",
"0.5963931",
"0.5961095",
"0.594921",
"0.5933807",
"0.5929915",
"0.5928864",
"0.5905212"
]
| 0.75656223 | 1 |
Discover and add a Tasmota device automation. | async def async_discover(tasmota_automation, discovery_hash):
if tasmota_automation.automation_type == AUTOMATION_TYPE_TRIGGER:
await device_trigger.async_setup_trigger(
hass, tasmota_automation, config_entry, discovery_hash
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_device(self):\n\n pass",
"def flash_tasmota(self, flash_mode, serial_port):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch expected: \"{expected}\", got \"{current}\"{NC}'.format(**colors, expected=current_tasmota_version, current= get_tasmota_version()))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio_override.ini')\n tasmotaPIO = os.path.join(tasmota_dir, 'platformio_override.ini')\n if not os.path.exists(tasmotaPIO) or not cmp(correctPIO, tasmotaPIO):\n copyfile(correctPIO, tasmotaPIO)\n\n\n os.chdir(tasmota_dir)\n\n pio_call = 'platformio run -e tasmota-{flash_mode} -t upload'.format(flash_mode=flash_mode);\n\n # if we're flashing via wifi or serial port is specified to us,\n # specify it to pio\n if flash_mode == 'wifi' or serial_port:\n pio_call += ' --upload-port {port}'\n\n if flash_mode == 'wifi':\n self.flashing_notice(flash_mode, self.ip_addr)\n # If we don't know the IP address, ask device\n if not 'ip_addr' in self or not self.ip_addr:\n print('No IP address for this device in the config.'\n 'Querying device...')\n self.query_tas_status()\n if 'ip' in self.reported:\n print('{name} is online at {ip}'.format(name=self.f_name,\n ip=self.reported['ip']))\n self.ip_addr = self.reported['ip']\n else:\n print('{f_name} did not respond at {c_topic}. IP address '\n 'unavailable. Skipping device...'.format(**self))\n return(False)\n pio_call = pio_call.format(port=(self.ip_addr + '/u2'))\n elif flash_mode == 'serial':\n self.flashing_notice(flash_mode, serial_port)\n pio_call = pio_call.format(port=serial_port)\n print('{BLUE}{f_name}\\'s MQTT topic is '\n '{topic}{NC}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n return(True if flash_result == 0 else False)",
"def async_add_devices_discovery(hass, discovery_info, async_add_devices):\n items = discovery_info[CONF_ITEMS]\n for item in items:\n async_add_devices([AmpioSwitch(hass, item)])",
"def flash_tasmota(self):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch{NOCOLOR}'.format(**colors))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio.ini')\n tasmotaPIO = os.path.join(tasmotadir, 'platformio.ini')\n if filecmp.cmp(correctPIO, tasmotaPIO) == False:\n shutil.copyfile(correctPIO, tasmotaPIO)\n\n os.chdir(tasmotadir)\n pio_call = 'platformio run -e {environment} -t upload --upload-port {port}'\n if self.flash_mode == 'wifi':\n pio_call = pio_call.format(environment='sonoff-wifi', port=(self.ip_addr + '/u2'))\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {ip_addr}{NOCOLOR}'.format(**colors, **self)))\n elif self.flash_mode == 'serial':\n pio_call = pio_call.format(environment='sonoff-serial', port=self.serial_port)\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {serial_port}{NOCOLOR}'.format(**colors, **self)))\n print('{BLUE}{f_name}\\'s MQTT topic is {base_topic}/{topic}{NOCOLOR}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n os.chdir(espqdir)\n return(True if flash_result == 0 else False)",
"def device_discovery(endless):\r\n click.echo(\"start device discovery ...\")\r\n _device_discovery(endless)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n from pybotvac import Account\n\n try:\n auth = Account(config[CONF_USERNAME], config[CONF_PASSWORD])\n except HTTPError:\n _LOGGER.error(\"Unable to connect to Neato API\")\n return False\n\n dev = []\n for robot in auth.robots:\n for type_name in SWITCH_TYPES:\n dev.append(NeatoConnectedSwitch(robot, type_name))\n add_devices(dev)",
"def addDevice(self, node, fullDeviceName, device):",
"def setup(port, baud = int('9600'), apn = 'internet.movistar.com.co'):\n try:\n module = serial.Serial('/dev/tty{}'.format(port.upper(), '{}'.format(baud)))\n time.sleep(0.1)\n if module.isOpen():\n print ('Serial Port Available')\n else:\n print ('Serial Port not Available')\n except serial.SerialException:\n print ('Something goes wrong')\n module.close()\n try:\n module.write('AT+CGATT=1\\r\\n'.encode())\n time.sleep(0.01)\n module.write(('AT+CGDCONT=1,\\\"IP\\\",\\\"{}\\\"\\r\\n').format(apn).encode()) \n time.sleep(0.01)\n module.write(('AT+CGSOCKCONT=1,\\\"IP\\\",\\\"{}\\\"\\r\\n').format(apn).encode())\n module.write(('AT+CSOCKSETPN=1\\r\\n').encode())\n time.sleep(0.01)\n module.write(('AT+CGPSURL=\\\"supl.google.com:7276\\\"\\r\\n').encode())\n time.sleep(0.1)\n module.write(('AT+CGPSSSL=1\\r\\n').encode())\n time.sleep(0.1)\n #module.write(('AT+CGPS=1,3\\r\\n').encode())\n #time.sleep(0.2)\n #if _valid_gps(module):\n # print ('GPS configurated')\n #else:\n # print ('GPS not configurated')\n print ('SIM53XX Configurated!')\n except serial.SerialException:\n print ('Something failed during configuration\\rPlase try again...')\n\n return module",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n bt_device_id: int = config[CONF_BT_DEVICE_ID]\n\n beacons: dict[str, dict[str, str]] = config[CONF_BEACONS]\n devices: list[EddystoneTemp] = []\n\n for dev_name, properties in beacons.items():\n namespace = get_from_conf(properties, CONF_NAMESPACE, 20)\n instance = get_from_conf(properties, CONF_INSTANCE, 12)\n name = properties.get(CONF_NAME, dev_name)\n\n if instance is None or namespace is None:\n _LOGGER.error(\"Skipping %s\", dev_name)\n continue\n\n devices.append(EddystoneTemp(name, namespace, instance))\n\n if devices:\n mon = Monitor(hass, devices, bt_device_id)\n\n def monitor_stop(event: Event) -> None:\n \"\"\"Stop the monitor thread.\"\"\"\n _LOGGER.info(\"Stopping scanner for Eddystone beacons\")\n mon.stop()\n\n def monitor_start(event: Event) -> None:\n \"\"\"Start the monitor thread.\"\"\"\n _LOGGER.info(\"Starting scanner for Eddystone beacons\")\n mon.start()\n\n add_entities(devices)\n mon.start()\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)\n else:\n _LOGGER.warning(\"No devices were added\")",
"async def async_setup_entry(hass, config_entry):\n\n async def async_device_removed(event):\n \"\"\"Handle the removal of a device.\"\"\"\n if event.data[\"action\"] != \"remove\":\n return\n await async_remove_automations(hass, event.data[\"device_id\"])\n\n async def async_discover(tasmota_automation, discovery_hash):\n \"\"\"Discover and add a Tasmota device automation.\"\"\"\n if tasmota_automation.automation_type == AUTOMATION_TYPE_TRIGGER:\n await device_trigger.async_setup_trigger(\n hass, tasmota_automation, config_entry, discovery_hash\n )\n\n hass.data[\n DATA_REMOVE_DISCOVER_COMPONENT.format(\"device_automation\")\n ] = async_dispatcher_connect(\n hass,\n TASMOTA_DISCOVERY_ENTITY_NEW.format(\"device_automation\"),\n async_discover,\n )\n hass.data[DATA_UNSUB].append(\n hass.bus.async_listen(EVENT_DEVICE_REGISTRY_UPDATED, async_device_removed)\n )",
"async def create_automation(self, automation: Automation) -> UUID:\n if self.server_type != ServerType.CLOUD:\n raise RuntimeError(\"Automations are only supported for Prefect Cloud.\")\n\n response = await self._client.post(\n \"/automations/\",\n json=automation.dict(json_compatible=True),\n )\n\n return UUID(response.json()[\"id\"])",
"def test_usb_device(self):\n candidate = Ftdi.get_identifiers(self.ftdi_url)\n usbdev = UsbTools.get_device(candidate[0])\n i2c = I2cController()\n i2c.configure(usbdev, interface=candidate[1], frequency=100e3)\n eeprom = SerialEepromManager.get_from_controller(i2c, '24AA32A', 0x50)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n import sharp_aquos_rc\n\n name = config.get(CONF_NAME)\n port = config.get(CONF_PORT)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n if discovery_info:\n _LOGGER.debug('%s', discovery_info)\n vals = discovery_info.split(':')\n if len(vals) > 1:\n port = vals[1]\n\n host = vals[0]\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n add_devices([SharpAquosTVDevice(name, remote)])\n return True\n\n host = config.get(CONF_HOST)\n remote = sharp_aquos_rc.TV(host,\n port,\n username,\n password)\n\n add_devices([SharpAquosTVDevice(name, remote)])\n return True",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n code = config.get(CONF_CODE)\n username = config.get(CONF_USERNAME)\n password = config.get(CONF_PASSWORD)\n\n add_devices([SimpliSafeAlarm(name, username, password, code)])",
"def _setup_device(\n id,\n baud_rate,\n read_termination=\"\\n\",\n write_termination=\"\\n\",\n timeout=None,\n wait_after_connect=0.0,\n ):\n # Get a handle to the instrument\n rm = pyvisa.ResourceManager(\"@py\")\n\n logger.debug(f\"Devices: {rm.list_resources()}\")\n\n # pyvisa-py doesn't have \"COM\" aliases for ASRL serial ports, so convert\n regex_match = re.match(r\"^com(\\d{1,3})$\", id.lower())\n if regex_match:\n id = f\"ASRL{regex_match[1]}\"\n\n logger.debug(f\"Connecting to : {id}\")\n\n instr = rm.open_resource(id)\n\n logger.debug(f\"Connection: {instr}\")\n\n # Configure the connection as required\n\n logger.debug(\n \"Setting up connection with baud %s, write_term %s and read_term %s\",\n baud_rate,\n repr(write_termination),\n repr(read_termination),\n )\n\n instr.baud_rate = baud_rate\n if read_termination:\n instr.read_termination = read_termination\n if write_termination:\n instr.write_termination = write_termination\n if timeout:\n instr.timeout = timeout\n # instr.data_bits = 8\n # instr.stop_bits = pyvisa.constants.StopBits.one\n # instr.parity = pyvisa.constants.Parity.none\n # instr.flow_control = visa.constants.VI_ASRL_FLOW_NONE\n\n if wait_after_connect:\n time.sleep(wait_after_connect)\n\n logger.debug('Device \"{}\" init complete'.format(id))\n\n return instr",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n pin = config.get(CONF_PIN)\n\n add_devices([ProgtimeSwitch(mac, pin, name)])",
"def addExtraDevices(self):\n \n # These tables were extracted from\n # pirates/src/piratesgui/GameOptions.py.\n \n ati_device_list = [ \n [\"ATI MOBILITY/RADEON X700\", 0x5653],\n [1, \"Radeon X1950 XTX Uber - Limited Edition\", 0x7248],\n [1, \"Radeon X1950 XTX Uber - Limited Edition Secondary\", 0x7268],\n [1, \"Radeon X800 CrossFire Edition\", 0x554D],\n [1, \"Radeon X800 CrossFire Edition Secondary\", 0x556D],\n [1, \"Radeon X850 CrossFire Edition\", 0x5D52],\n [1, \"Radeon X850 CrossFire Edition Secondary\", 0x5D72],\n [\"Radeon X550/X700 Series\", 0x564F],\n [\"ATI FireGL T2\", 0x4154],\n [\"ATI FireGL T2 Secondary\", 0x4174],\n [\"ATI FireGL V3100\", 0x5B64],\n [\"ATI FireGL V3100 Secondary\", 0x5B74],\n [\"ATI FireGL V3200\", 0x3E54],\n [\"ATI FireGL V3200 Secondary\", 0x3E74],\n [\"ATI FireGL V3300\", 0x7152],\n [\"ATI FireGL V3300 Secondary\", 0x7172],\n [\"ATI FireGL V3350\", 0x7153],\n [\"ATI FireGL V3350 Secondary\", 0x7173],\n [\"ATI FireGL V3400\", 0x71D2],\n [\"ATI FireGL V3400 Secondary\", 0x71F2],\n [\"ATI FireGL V5000\", 0x5E48],\n [\"ATI FireGL V5000 Secondary\", 0x5E68],\n [\"ATI FireGL V5100\", 0x5551],\n [\"ATI FireGL V5100 Secondary\", 0x5571],\n [\"ATI FireGL V5200\", 0x71DA],\n [\"ATI FireGL V5200 Secondary\", 0x71FA],\n [\"ATI FireGL V5300\", 0x7105],\n [\"ATI FireGL V5300 Secondary\", 0x7125],\n [\"ATI FireGL V7100\", 0x5550],\n [\"ATI FireGL V7100 Secondary\", 0x5570],\n [\"ATI FireGL V7200\", 0x5D50],\n [\"ATI FireGL V7200 \", 0x7104],\n [\"ATI FireGL V7200 Secondary\", 0x5D70],\n [\"ATI FireGL V7200 Secondary \", 0x7124],\n [\"ATI FireGL V7300\", 0x710E],\n [\"ATI FireGL V7300 Secondary\", 0x712E],\n [\"ATI FireGL V7350\", 0x710F],\n [\"ATI FireGL V7350 Secondary\", 0x712F],\n [\"ATI FireGL X1\", 0x4E47],\n [\"ATI FireGL X1 Secondary\", 0x4E67],\n [\"ATI FireGL X2-256/X2-256t\", 0x4E4B],\n [\"ATI FireGL X2-256/X2-256t Secondary\", 0x4E6B],\n [\"ATI FireGL X3-256\", 0x4A4D],\n [\"ATI FireGL X3-256 Secondary\", 0x4A6D],\n [\"ATI FireGL Z1\", 0x4147],\n [\"ATI FireGL Z1 Secondary\", 0x4167],\n [\"ATI FireMV 2200\", 0x5B65],\n [\"ATI FireMV 2200 Secondary\", 0x5B75],\n [\"ATI FireMV 2250\", 0x719B],\n [\"ATI FireMV 2250 Secondary\", 0x71BB],\n [\"ATI FireMV 2400\", 0x3151],\n [\"ATI FireMV 2400 Secondary\", 0x3171],\n [\"ATI FireStream 2U\", 0x724E],\n [\"ATI FireStream 2U Secondary\", 0x726E],\n [\"ATI MOBILITY FIRE GL 7800\", 0x4C58],\n [\"ATI MOBILITY FIRE GL T2/T2e\", 0x4E54],\n [\"ATI MOBILITY FireGL V3100\", 0x5464],\n [\"ATI MOBILITY FireGL V3200\", 0x3154],\n [\"ATI MOBILITY FireGL V5000\", 0x564A],\n [\"ATI MOBILITY FireGL V5000 \", 0x564B],\n [\"ATI MOBILITY FireGL V5100\", 0x5D49],\n [\"ATI MOBILITY FireGL V5200\", 0x71C4],\n [\"ATI MOBILITY FireGL V5250\", 0x71D4],\n [\"ATI MOBILITY FireGL V7100\", 0x7106],\n [\"ATI MOBILITY FireGL V7200\", 0x7103],\n [\"ATI MOBILITY RADEON\", 0x4C59],\n [\"ATI MOBILITY RADEON 7500\", 0x4C57],\n [\"ATI MOBILITY RADEON 9500\", 0x4E52],\n [\"ATI MOBILITY RADEON 9550\", 0x4E56],\n [\"ATI MOBILITY RADEON 9600/9700 Series\", 0x4E50],\n [\"ATI MOBILITY RADEON 9800\", 0x4A4E],\n [\"ATI Mobility Radeon HD 2300\", 0x7210],\n [\"ATI Mobility Radeon HD 2300 \", 0x7211],\n [\"ATI Mobility Radeon HD 2400\", 0x94C9],\n [\"ATI Mobility Radeon HD 2400 XT\", 0x94C8],\n [1, \"ATI Mobility Radeon HD 2600\", 0x9581],\n [1, \"ATI Mobility Radeon HD 2600 XT\", 0x9583],\n [\"ATI Mobility Radeon X1300\", 0x714A],\n [\"ATI Mobility Radeon X1300 \", 0x7149],\n [\"ATI Mobility Radeon X1300 \", 0x714B],\n [\"ATI Mobility Radeon X1300 \", 0x714C],\n [\"ATI Mobility Radeon X1350\", 0x718B],\n [\"ATI Mobility Radeon X1350 \", 0x718C],\n [\"ATI Mobility Radeon X1350 \", 0x7196],\n [\"ATI Mobility Radeon X1400\", 0x7145],\n [\"ATI Mobility Radeon X1450\", 0x7186],\n [\"ATI Mobility Radeon X1450 \", 0x718D],\n [\"ATI Mobility Radeon X1600\", 0x71C5],\n [\"ATI Mobility Radeon X1700\", 0x71D5],\n [\"ATI Mobility Radeon X1700 \", 0x71DE],\n [\"ATI Mobility Radeon X1700 XT\", 0x71D6],\n [1, \"ATI Mobility Radeon X1800\", 0x7102],\n [1, \"ATI Mobility Radeon X1800 XT\", 0x7101],\n [1, \"ATI Mobility Radeon X1900\", 0x7284],\n [1, \"ATI Mobility Radeon X2300\", 0x718A],\n [1, \"ATI Mobility Radeon X2300 \", 0x7188],\n [\"ATI MOBILITY RADEON X300\", 0x5461],\n [\"ATI MOBILITY RADEON X300 \", 0x5460],\n [\"ATI MOBILITY RADEON X300 \", 0x3152],\n [\"ATI MOBILITY RADEON X600\", 0x3150],\n [\"ATI MOBILITY RADEON X600 SE\", 0x5462],\n [\"ATI MOBILITY RADEON X700\", 0x5652],\n [\"ATI MOBILITY RADEON X700 \", 0x5653],\n [\"ATI MOBILITY RADEON X700 Secondary\", 0x5673],\n [1, \"ATI MOBILITY RADEON X800\", 0x5D4A],\n [1, \"ATI MOBILITY RADEON X800 XT\", 0x5D48],\n [\"ATI Radeon 9550/X1050 Series\", 0x4153],\n [\"ATI Radeon 9550/X1050 Series Secondary\", 0x4173],\n [\"ATI RADEON 9600 Series\", 0x4150],\n [\"ATI RADEON 9600 Series \", 0x4E51],\n [\"ATI RADEON 9600 Series \", 0x4151],\n [\"ATI RADEON 9600 Series \", 0x4155],\n [\"ATI RADEON 9600 Series \", 0x4152],\n [\"ATI RADEON 9600 Series Secondary\", 0x4E71],\n [\"ATI RADEON 9600 Series Secondary \", 0x4171],\n [\"ATI RADEON 9600 Series Secondary \", 0x4170],\n [\"ATI RADEON 9600 Series Secondary \", 0x4175],\n [\"ATI RADEON 9600 Series Secondary \", 0x4172],\n [1, \"ATI Radeon HD 2900 XT\", 0x9402],\n [1, \"ATI Radeon HD 2900 XT \", 0x9403],\n [1, \"ATI Radeon HD 2900 XT \", 0x9400],\n [1, \"ATI Radeon HD 2900 XT \", 0x9401],\n [\"ATI Radeon X1200 Series\", 0x791E],\n [\"ATI Radeon X1200 Series \", 0x791F],\n [1, \"ATI Radeon X1950 GT\", 0x7288],\n [1, \"ATI Radeon X1950 GT Secondary\", 0x72A8],\n [1, \"ATI RADEON X800 GT\", 0x554E],\n [1, \"ATI RADEON X800 GT Secondary\", 0x556E],\n [1, \"ATI RADEON X800 XL\", 0x554D],\n [1, \"ATI RADEON X800 XL Secondary\", 0x556D],\n [1, \"ATI RADEON X850 PRO\", 0x4B4B],\n [1, \"ATI RADEON X850 PRO Secondary\", 0x4B6B],\n [1, \"ATI RADEON X850 SE\", 0x4B4A],\n [1, \"ATI RADEON X850 SE Secondary\", 0x4B6A],\n [1, \"ATI RADEON X850 XT\", 0x4B49],\n [1, \"ATI RADEON X850 XT Platinum Edition\", 0x4B4C],\n [1, \"ATI RADEON X850 XT Platinum Edition Secondary\", 0x4B6C],\n [1, \"ATI RADEON X850 XT Secondary\", 0x4B69],\n [\"ATI Radeon Xpress 1200 Series\", 0x793F],\n [\"ATI Radeon Xpress 1200 Series \", 0x7941],\n [\"ATI Radeon Xpress 1200 Series \", 0x7942],\n [\"ATI Radeon Xpress Series\", 0x5A61],\n [\"ATI Radeon Xpress Series \", 0x5A63],\n [\"ATI Radeon Xpress Series \", 0x5A62],\n [\"ATI Radeon Xpress Series \", 0x5A41],\n [\"ATI Radeon Xpress Series \", 0x5A43],\n [\"ATI Radeon Xpress Series \", 0x5A42],\n [\"ATI Radeon Xpress Series \", 0x5954],\n [\"ATI Radeon Xpress Series \", 0x5854],\n [\"ATI Radeon Xpress Series \", 0x5955],\n [\"ATI Radeon Xpress Series \", 0x5974],\n [\"ATI Radeon Xpress Series \", 0x5874],\n [\"ATI Radeon Xpress Series \", 0x5975],\n [\"Radeon 9500\", 0x4144],\n [\"Radeon 9500 \", 0x4149],\n [\"Radeon 9500 PRO / 9700\", 0x4E45],\n [\"Radeon 9500 PRO / 9700 Secondary\", 0x4E65],\n [\"Radeon 9500 Secondary\", 0x4164],\n [\"Radeon 9500 Secondary \", 0x4169],\n [\"Radeon 9600 TX\", 0x4E46],\n [\"Radeon 9600 TX Secondary\", 0x4E66],\n [\"Radeon 9600TX\", 0x4146],\n [\"Radeon 9600TX Secondary\", 0x4166],\n [\"Radeon 9700 PRO\", 0x4E44],\n [\"Radeon 9700 PRO Secondary\", 0x4E64],\n [\"Radeon 9800\", 0x4E49],\n [\"Radeon 9800 PRO\", 0x4E48],\n [\"Radeon 9800 PRO Secondary\", 0x4E68],\n [\"Radeon 9800 SE\", 0x4148],\n [\"Radeon 9800 SE Secondary\", 0x4168],\n [\"Radeon 9800 Secondary\", 0x4E69],\n [\"Radeon 9800 XT\", 0x4E4A],\n [\"Radeon 9800 XT Secondary\", 0x4E6A],\n [\"Radeon X1300 / X1550 Series\", 0x7146],\n [\"Radeon X1300 / X1550 Series Secondary\", 0x7166],\n [\"Radeon X1300 Series\", 0x714E],\n [\"Radeon X1300 Series \", 0x715E],\n [\"Radeon X1300 Series \", 0x714D],\n [\"Radeon X1300 Series \", 0x71C3],\n [\"Radeon X1300 Series \", 0x718F],\n [\"Radeon X1300 Series Secondary\", 0x716E],\n [\"Radeon X1300 Series Secondary \", 0x717E],\n [\"Radeon X1300 Series Secondary \", 0x716D],\n [\"Radeon X1300 Series Secondary \", 0x71E3],\n [\"Radeon X1300 Series Secondary \", 0x71AF],\n [\"Radeon X1300/X1550 Series\", 0x7142],\n [\"Radeon X1300/X1550 Series \", 0x7180],\n [\"Radeon X1300/X1550 Series \", 0x7183],\n [\"Radeon X1300/X1550 Series \", 0x7187],\n [\"Radeon X1300/X1550 Series Secondary\", 0x7162],\n [\"Radeon X1300/X1550 Series Secondary \", 0x71A0],\n [\"Radeon X1300/X1550 Series Secondary \", 0x71A3],\n [\"Radeon X1300/X1550 Series Secondary \", 0x71A7],\n [\"Radeon X1550 64-bit\", 0x7147],\n [\"Radeon X1550 64-bit \", 0x715F],\n [\"Radeon X1550 64-bit \", 0x719F],\n [\"Radeon X1550 64-bit Secondary\", 0x7167],\n [\"Radeon X1550 64-bit Secondary \", 0x717F],\n [\"Radeon X1550 Series\", 0x7143],\n [\"Radeon X1550 Series \", 0x7193],\n [\"Radeon X1550 Series Secondary\", 0x7163],\n [\"Radeon X1550 Series Secondary \", 0x71B3],\n [\"Radeon X1600 Pro / Radeon X1300 XT\", 0x71CE],\n [\"Radeon X1600 Pro / Radeon X1300 XT Secondary\", 0x71EE],\n [\"Radeon X1600 Series\", 0x7140],\n [\"Radeon X1600 Series \", 0x71C0],\n [\"Radeon X1600 Series \", 0x71C2],\n [\"Radeon X1600 Series \", 0x71C6],\n [\"Radeon X1600 Series \", 0x7181],\n [\"Radeon X1600 Series \", 0x71CD],\n [\"Radeon X1600 Series Secondary\", 0x7160],\n [\"Radeon X1600 Series Secondary \", 0x71E2],\n [\"Radeon X1600 Series Secondary \", 0x71E6],\n [\"Radeon X1600 Series Secondary \", 0x71A1],\n [\"Radeon X1600 Series Secondary \", 0x71ED],\n [\"Radeon X1600 Series Secondary \", 0x71E0],\n [\"Radeon X1650 Series\", 0x71C1],\n [\"Radeon X1650 Series \", 0x7293],\n [\"Radeon X1650 Series \", 0x7291],\n [\"Radeon X1650 Series \", 0x71C7],\n [\"Radeon X1650 Series Secondary\", 0x71E1],\n [\"Radeon X1650 Series Secondary \", 0x72B3],\n [\"Radeon X1650 Series Secondary \", 0x72B1],\n [\"Radeon X1650 Series Secondary \", 0x71E7],\n [1, \"Radeon X1800 Series\", 0x7100],\n [1, \"Radeon X1800 Series \", 0x7108],\n [1, \"Radeon X1800 Series \", 0x7109],\n [1, \"Radeon X1800 Series \", 0x710A],\n [1, \"Radeon X1800 Series \", 0x710B],\n [1, \"Radeon X1800 Series \", 0x710C],\n [1, \"Radeon X1800 Series Secondary\", 0x7120],\n [1, \"Radeon X1800 Series Secondary \", 0x7128],\n [1, \"Radeon X1800 Series Secondary \", 0x7129],\n [1, \"Radeon X1800 Series Secondary \", 0x712A],\n [1, \"Radeon X1800 Series Secondary \", 0x712B],\n [1, \"Radeon X1800 Series Secondary \", 0x712C],\n [1, \"Radeon X1900 Series\", 0x7243],\n [1, \"Radeon X1900 Series \", 0x7245],\n [1, \"Radeon X1900 Series \", 0x7246],\n [1, \"Radeon X1900 Series \", 0x7247],\n [1, \"Radeon X1900 Series \", 0x7248],\n [1, \"Radeon X1900 Series \", 0x7249],\n [1, \"Radeon X1900 Series \", 0x724A],\n [1, \"Radeon X1900 Series \", 0x724B],\n [1, \"Radeon X1900 Series \", 0x724C],\n [1, \"Radeon X1900 Series \", 0x724D],\n [1, \"Radeon X1900 Series \", 0x724F],\n [1, \"Radeon X1900 Series Secondary\", 0x7263],\n [1, \"Radeon X1900 Series Secondary \", 0x7265],\n [1, \"Radeon X1900 Series Secondary \", 0x7266],\n [1, \"Radeon X1900 Series Secondary \", 0x7267],\n [1, \"Radeon X1900 Series Secondary \", 0x7268],\n [1, \"Radeon X1900 Series Secondary \", 0x7269],\n [1, \"Radeon X1900 Series Secondary \", 0x726A],\n [1, \"Radeon X1900 Series Secondary \", 0x726B],\n [1, \"Radeon X1900 Series Secondary \", 0x726C],\n [1, \"Radeon X1900 Series Secondary \", 0x726D],\n [1, \"Radeon X1900 Series Secondary \", 0x726F],\n [1, \"Radeon X1950 Series\", 0x7280],\n [1, \"Radeon X1950 Series \", 0x7240],\n [1, \"Radeon X1950 Series \", 0x7244],\n [1, \"Radeon X1950 Series Secondary\", 0x72A0],\n [1, \"Radeon X1950 Series Secondary \", 0x7260],\n [1, \"Radeon X1950 Series Secondary \", 0x7264],\n [\"Radeon X300/X550/X1050 Series\", 0x5B60],\n [\"Radeon X300/X550/X1050 Series \", 0x5B63],\n [\"Radeon X300/X550/X1050 Series Secondary\", 0x5B73],\n [\"Radeon X300/X550/X1050 Series Secondary \", 0x5B70],\n [\"Radeon X550/X700 Series \", 0x5657],\n [\"Radeon X550/X700 Series Secondary\", 0x5677],\n [\"Radeon X600 Series\", 0x5B62],\n [\"Radeon X600 Series Secondary\", 0x5B72],\n [\"Radeon X600/X550 Series\", 0x3E50],\n [\"Radeon X600/X550 Series Secondary\", 0x3E70],\n [\"Radeon X700\", 0x5E4D],\n [\"Radeon X700 PRO\", 0x5E4B],\n [\"Radeon X700 PRO Secondary\", 0x5E6B],\n [\"Radeon X700 SE\", 0x5E4C],\n [\"Radeon X700 SE Secondary\", 0x5E6C],\n [\"Radeon X700 Secondary\", 0x5E6D],\n [\"Radeon X700 XT\", 0x5E4A],\n [\"Radeon X700 XT Secondary\", 0x5E6A],\n [\"Radeon X700/X550 Series\", 0x5E4F],\n [\"Radeon X700/X550 Series Secondary\", 0x5E6F],\n [1, \"Radeon X800 GT\", 0x554B],\n [1, \"Radeon X800 GT Secondary\", 0x556B],\n [1, \"Radeon X800 GTO\", 0x5549],\n [1, \"Radeon X800 GTO \", 0x554F],\n [1, \"Radeon X800 GTO \", 0x5D4F],\n [1, \"Radeon X800 GTO Secondary\", 0x5569],\n [1, \"Radeon X800 GTO Secondary \", 0x556F],\n [1, \"Radeon X800 GTO Secondary \", 0x5D6F],\n [1, \"Radeon X800 PRO\", 0x4A49],\n [1, \"Radeon X800 PRO Secondary\", 0x4A69],\n [1, \"Radeon X800 SE\", 0x4A4F],\n [1, \"Radeon X800 SE Secondary\", 0x4A6F],\n [1, \"Radeon X800 Series\", 0x4A48],\n [1, \"Radeon X800 Series \", 0x4A4A],\n [1, \"Radeon X800 Series \", 0x4A4C],\n [1, \"Radeon X800 Series \", 0x5548],\n [1, \"Radeon X800 Series Secondary\", 0x4A68],\n [1, \"Radeon X800 Series Secondary \", 0x4A6A],\n [1, \"Radeon X800 Series Secondary \", 0x4A6C],\n [1, \"Radeon X800 Series Secondary \", 0x5568],\n [1, \"Radeon X800 VE\", 0x4A54],\n [1, \"Radeon X800 VE Secondary\", 0x4A74],\n [1, \"Radeon X800 XT\", 0x4A4B],\n [1, \"Radeon X800 XT \", 0x5D57],\n [1, \"Radeon X800 XT Platinum Edition\", 0x4A50],\n [1, \"Radeon X800 XT Platinum Edition \", 0x554A],\n [1, \"Radeon X800 XT Platinum Edition Secondary\", 0x4A70],\n [1, \"Radeon X800 XT Platinum Edition Secondary \", 0x556A],\n [1, \"Radeon X800 XT Secondary\", 0x4A6B],\n [1, \"Radeon X800 XT Secondary \", 0x5D77],\n [1, \"Radeon X850 XT\", 0x5D52],\n [1, \"Radeon X850 XT Platinum Edition\", 0x5D4D],\n [1, \"Radeon X850 XT Platinum Edition Secondary\", 0x5D6D],\n [1, \"Radeon X850 XT Secondary\", 0x5D72],\n ]\n vendorId = 0x1002\n for entry in ati_device_list:\n if len(entry) == 3:\n flag, deviceName, deviceId = entry\n else:\n deviceName, deviceId = entry\n self.devices[(vendorId, deviceId)] = deviceName.strip()\n \n nvidia_device_list = [\n [0x014F, \"GeForce 6200\"],\n [0x00F3, \"GeForce 6200\"],\n [0x0221, \"GeForce 6200\"],\n [0x0163, \"GeForce 6200 LE\"],\n [0x0162, \"GeForce 6200SE TurboCache(TM)\"],\n [0x0161, \"GeForce 6200 TurboCache(TM)\"],\n [0x0162, \"GeForce 6200SE TurboCache(TM)\"],\n [0x0160, \"GeForce 6500\"],\n [1, 0x0141, \"GeForce 6600\"],\n [1, 0x00F2, \"GeForce 6600\"],\n [1, 0x0140, \"GeForce 6600 GT\"],\n [1, 0x00F1, \"GeForce 6600 GT\"],\n [1, 0x0142, \"GeForce 6600 LE\"],\n [1, 0x00F4, \"GeForce 6600 LE\"],\n [1, 0x0143, \"GeForce 6600 VE\"],\n [1, 0x0147, \"GeForce 6700 XL\"],\n [1, 0x0041, \"GeForce 6800\"],\n [1, 0x00C1, \"GeForce 6800\"],\n [1, 0x0047, \"GeForce 6800 GS\"],\n [1, 0x00F6, \"GeForce 6800 GS\"],\n [1, 0x00C0, \"GeForce 6800 GS\"],\n [1, 0x0045, \"GeForce 6800 GT\"],\n [1, 0x00F9, \"GeForce 6800 Series GPU\"],\n [1, 0x00C2, \"GeForce 6800 LE\"],\n [1, 0x0040, \"GeForce 6800 Ultra\"],\n [1, 0x00F9, \"GeForce 6800 Series GPU\"],\n [1, 0x0043, \"GeForce 6800 XE\"],\n [1, 0x0048, \"GeForce 6800 XT\"],\n [1, 0x0218, \"GeForce 6800 XT\"],\n [1, 0x00C3, \"GeForce 6800 XT\"],\n [0x01DF, \"GeForce 7300 GS\"],\n [0x0393, \"GeForce 7300 GT\"],\n [0x01D1, \"GeForce 7300 LE\"],\n [0x01D3, \"GeForce 7300 SE\"],\n [0x01DD, \"GeForce 7500 LE\"],\n [1, 0x0392, \"GeForce 7600 GS\"],\n [1, 0x0392, \"GeForce 7600 GS\"],\n [1, 0x02E1, \"GeForce 7600 GS\"],\n [1, 0x0391, \"GeForce 7600 GT\"],\n [1, 0x0394, \"GeForce 7600 LE\"],\n [1, 0x00F5, \"GeForce 7800 GS\"],\n [1, 0x0092, \"GeForce 7800 GT\"],\n [1, 0x0091, \"GeForce 7800 GTX\"],\n [1, 0x0291, \"GeForce 7900 GT/GTO\"],\n [1, 0x0290, \"GeForce 7900 GTX\"],\n [1, 0x0293, \"GeForce 7900 GX2\"],\n [1, 0x0294, \"GeForce 7950 GX2\"],\n [0x0322, \"GeForce FX 5200\"],\n [0x0321, \"GeForce FX 5200 Ultra\"],\n [0x0323, \"GeForce FX 5200LE\"],\n [0x0326, \"GeForce FX 5500\"],\n [0x0326, \"GeForce FX 5500\"],\n [0x0312, \"GeForce FX 5600\"],\n [0x0311, \"GeForce FX 5600 Ultra\"],\n [0x0314, \"GeForce FX 5600XT\"],\n [0x0342, \"GeForce FX 5700\"],\n [0x0341, \"GeForce FX 5700 Ultra\"],\n [0x0343, \"GeForce FX 5700LE\"],\n [0x0344, \"GeForce FX 5700VE\"],\n [0x0302, \"GeForce FX 5800\"],\n [0x0301, \"GeForce FX 5800 Ultra\"],\n [0x0331, \"GeForce FX 5900\"],\n [0x0330, \"GeForce FX 5900 Ultra\"],\n [0x0333, \"GeForce FX 5950 Ultra\"],\n [0x0324, \"GeForce FX Go5200 64M\"],\n [0x031A, \"GeForce FX Go5600\"],\n [0x0347, \"GeForce FX Go5700\"],\n [0x0167, \"GeForce Go 6200/6400\"],\n [0x0168, \"GeForce Go 6200/6400\"],\n [1, 0x0148, \"GeForce Go 6600\"],\n [1, 0x00c8, \"GeForce Go 6800\"],\n [1, 0x00c9, \"GeForce Go 6800 Ultra\"],\n [1, 0x0098, \"GeForce Go 7800\"],\n [1, 0x0099, \"GeForce Go 7800 GTX\"],\n [1, 0x0298, \"GeForce Go 7900 GS\"],\n [1, 0x0299, \"GeForce Go 7900 GTX\"],\n [0x0185, \"GeForce MX 4000\"],\n [0x00FA, \"GeForce PCX 5750\"],\n [0x00FB, \"GeForce PCX 5900\"],\n [0x0110, \"GeForce2 MX/MX 400\"],\n [0x0111, \"GeForce2 MX200\"],\n [0x0110, \"GeForce2 MX/MX 400\"],\n [0x0200, \"GeForce3\"],\n [0x0201, \"GeForce3 Ti200\"],\n [0x0202, \"GeForce3 Ti500\"],\n [0x0172, \"GeForce4 MX 420\"],\n [0x0171, \"GeForce4 MX 440\"],\n [0x0181, \"GeForce4 MX 440 with AGP8X\"],\n [0x0173, \"GeForce4 MX 440-SE\"],\n [0x0170, \"GeForce4 MX 460\"],\n [0x0253, \"GeForce4 Ti 4200\"],\n [0x0281, \"GeForce4 Ti 4200 with AGP8X\"],\n [0x0251, \"GeForce4 Ti 4400\"],\n [0x0250, \"GeForce4 Ti 4600\"],\n [0x0280, \"GeForce4 Ti 4800\"],\n [0x0282, \"GeForce4 Ti 4800SE\"],\n [0x0203, \"Quadro DCC\"],\n [0x0309, \"Quadro FX 1000\"],\n [0x034E, \"Quadro FX 1100\"],\n [0x00FE, \"Quadro FX 1300\"],\n [0x00CE, \"Quadro FX 1400\"],\n [0x0308, \"Quadro FX 2000\"],\n [0x0338, \"Quadro FX 3000\"],\n [0x00FD, \"Quadro PCI-E Series\"],\n [1, 0x00F8, \"Quadro FX 3400/4400\"],\n [1, 0x00CD, \"Quadro FX 3450/4000 SDI\"],\n [1, 0x004E, \"Quadro FX 4000\"],\n [1, 0x00CD, \"Quadro FX 3450/4000 SDI\"],\n [1, 0x00F8, \"Quadro FX 3400/4400\"],\n [1, 0x009D, \"Quadro FX 4500\"],\n [1, 0x029F, \"Quadro FX 4500 X2\"],\n [0x032B, \"Quadro FX 500/FX 600\"],\n [0x014E, \"Quadro FX 540\"],\n [0x014C, \"Quadro FX 540 MXM\"],\n [0x032B, \"Quadro FX 500/FX 600\"],\n [0X033F, \"Quadro FX 700\"],\n [0x034C, \"Quadro FX Go1000\"],\n [0x00CC, \"Quadro FX Go1400\"],\n [0x031C, \"Quadro FX Go700\"],\n [0x018A, \"Quadro NVS with AGP8X\"],\n [0x032A, \"Quadro NVS 280 PCI\"],\n [0x00FD, \"Quadro PCI-E Series\"],\n [0x0165, \"Quadro NVS 285\"],\n [0x017A, \"Quadro NVS\"],\n [0x018A, \"Quadro NVS with AGP8X\"],\n [0x0113, \"Quadro2 MXR/EX\"],\n [0x017A, \"Quadro NVS\"],\n [0x018B, \"Quadro4 380 XGL\"],\n [0x0178, \"Quadro4 550 XGL\"],\n [0x0188, \"Quadro4 580 XGL\"],\n [0x025B, \"Quadro4 700 XGL\"],\n [0x0259, \"Quadro4 750 XGL\"],\n [0x0258, \"Quadro4 900 XGL\"],\n [0x0288, \"Quadro4 980 XGL\"],\n [0x028C, \"Quadro4 Go700\"],\n [1, 0x0295, \"NVIDIA GeForce 7950 GT\"],\n [0x03D0, \"NVIDIA GeForce 6100 nForce 430\"],\n [0x03D1, \"NVIDIA GeForce 6100 nForce 405\"],\n [0x03D2, \"NVIDIA GeForce 6100 nForce 400\"],\n [0x0241, \"NVIDIA GeForce 6150 LE\"],\n [0x0242, \"NVIDIA GeForce 6100\"],\n [0x0245, \"NVIDIA Quadro NVS 210S / NVIDIA GeForce 6150LE\"],\n [1, 0x029C, \"NVIDIA Quadro FX 5500\"],\n [1, 0x0191, \"NVIDIA GeForce 8800 GTX\"],\n [1, 0x0193, \"NVIDIA GeForce 8800 GTS\"],\n [1, 0x0400, \"NVIDIA GeForce 8600 GTS\"],\n [1, 0x0402, \"NVIDIA GeForce 8600 GT\"],\n [0x0421, \"NVIDIA GeForce 8500 GT\"],\n [0x0422, \"NVIDIA GeForce 8400 GS\"],\n [0x0423, \"NVIDIA GeForce 8300 GS\"],\n ]\n vendorId = 0x10de\n for entry in nvidia_device_list:\n if len(entry) == 3:\n flag, deviceId, deviceName = entry\n else:\n deviceId, deviceName = entry\n self.devices[(vendorId, deviceId)] = deviceName.strip()",
"def continue_setup_platform(hass, config, token, add_devices, discovery_info=None):\n if \"trakt\" in _CONFIGURING:\n hass.components.configurator.request_done(_CONFIGURING.pop(\"trakt\"))\n \n add_devices([TraktMyShowCalendarSensor(hass, config, token)], True)",
"def test_create_tang_1(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--trust-url\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"async def async_discover_sensor(tasmota_entity, discovery_hash):\n async_add_entities(\n [\n TasmotaSensor(\n tasmota_entity=tasmota_entity, discovery_hash=discovery_hash\n )\n ]\n )",
"def Add_Cisco_Device(device_type, host, username, password):\n cisco_device = main(device_type, host, username, password)\n cisco_list.append(cisco_device)",
"def test_create_tang_2(self):\n command_line = (\n self._MENU\n + [self._POOLNAME]\n + self._DEVICES\n + [\"--clevis=tang\", \"--thumbprint=print\", \"--tang-url=http\"]\n )\n TEST_RUNNER(command_line)",
"def setupMonti():\n #Update /etc/hosts with mongo-server and management-engine nodes\n sudo(\"apt-get install zookeeper\")\n sudo(\"apt-get install zookeeperd\")\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run (\"cd /etc/init.d && sudo update-rc.d chariot-nmw defaults 99\")\n sudo(\"reboot\")",
"def test_create_device(self):\n pass",
"def test_create_device(self):\n pass",
"def test_gwservice_createdevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n print(json.dumps(payload))\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device delete\", body=body)\n if resp.status_code != 200:\n assert False",
"def setup_platform(\n hass: HomeAssistant,\n config: Dict,\n add_devices: Callable,\n discovery_info: Optional[Dict] = None,\n) -> None:\n havdalah = config[HAVDALAH_MINUTES]\n candle_light = config[CANDLE_LIGHT_MINUTES]\n cities = config[GEONAMES]\n cities_list = cities.split(\",\")\n\n add_devices(\n [\n ShabbatTimes(\n hass,\n city,\n \"Shabbat Times {}\".format(city.replace(\"-\", \"_\")),\n havdalah,\n candle_light,\n )\n for city in cities_list\n ]\n )",
"def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))",
"def setup_platform(hass, config: ConfigType,\n add_devices: Callable[[list], None], discovery_info=[]):\n elk = hass.data['PyElk']['connection']\n elk_config = hass.data['PyElk']['config']\n discovered_devices = hass.data['PyElk']['discovered_devices']\n if elk is None:\n _LOGGER.error('Elk is None')\n return False\n if not elk.connected:\n _LOGGER.error('A connection has not been made to the Elk panel.')\n return False\n devices = []\n from PyElk.Thermostat import Thermostat as ElkThermostat\n # If no discovery info was passed in, discover automatically\n if len(discovery_info) == 0:\n # Gather areas\n for node in elk.THERMOSTATS:\n if node:\n if node.included is True and node.enabled is True:\n discovery_info.append(node)\n # If discovery info was passed in, check if we want to include it\n else:\n for node in discovery_info:\n if node.included is True and node.enabled is True:\n continue\n else:\n discovery_info.remove(node)\n # Add discovered devices\n for node in discovery_info:\n if isinstance(node, ElkThermostat):\n node_name = 'climate.' + 'elk_thermostat_' + format(node.number, '02')\n else:\n continue\n if node_name not in discovered_devices:\n _LOGGER.debug('Loading Elk %s: %s', node.classname, node.description_pretty())\n device = ElkClimateDevice(node)\n discovered_devices[node_name] = device\n devices.append(device)\n else:\n _LOGGER.debug('Skipping already loaded Elk %s: %s', node.classname, node.description_pretty())\n\n add_devices(devices, True)\n return True",
"def main():\n configs = [\"show configuration sessions\"]\n with EOSDriver(**MY_DEVICE) as conn:\n conn.register_configuration_session(session_name=\"my-config-session\")\n # for configuration sessions we have to first \"register\" the session with scrapli:\n result = conn.send_configs(configs=configs, privilege_level=\"my-config-session\")\n\n # we should see our session name with an \"*\" indicating that is the active config session\n print(result[0].result)"
]
| [
"0.60325503",
"0.5852927",
"0.5784722",
"0.5700005",
"0.5697959",
"0.56872237",
"0.56154716",
"0.55423445",
"0.5498065",
"0.5475614",
"0.54548955",
"0.54334223",
"0.5419632",
"0.541176",
"0.5364658",
"0.53400034",
"0.5306633",
"0.53012794",
"0.528794",
"0.5286875",
"0.52805877",
"0.52804947",
"0.5272169",
"0.5263267",
"0.5263267",
"0.5252011",
"0.52500504",
"0.5249773",
"0.52299273",
"0.52131855"
]
| 0.5898973 | 1 |
Returns average color distance between pixels in originalPixels list and newPixels list | def checkQualityOfSection(originalPixels, newPixels):
totalDistance = 0
numPixels = 0
for r in range(originalPixels.shape[0]):
for c in range(originalPixels.shape[1]):
if r == 0 or r == originalPixels.shape[0] - 1 or c == 0 or c == originalPixels.shape[1] - 1:
continue
totalDistance += colorDistance(originalPixels[r, c], newPixels[r, c])
numPixels += 1
return totalDistance/numPixels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(rgb1, rgb2):\n diffs = np.array(rgb1) - np.array(rgb2)\n return math.sqrt(np.sum(diffs**2))",
"def color_distance(RGB1, RGB2):\n d2_r = (RGB1[0] - RGB2[0]) ** 2\n d2_g = (RGB1[1] - RGB2[1]) ** 2\n d2_b = (RGB1[2] - RGB2[2]) ** 2\n return d2_r + d2_g + d2_b",
"def mold_image(self, images, mean_pixel):\n return images.astype(np.float32) - np.array(mean_pixel)\n pass",
"def colorDistance(color1, color2):\n intColor1 = np.array(color1, dtype=int)\n intColor2 = np.array(color2, dtype=int)\n return sqrt(2 * (intColor1[0] - intColor2[0])**2 + 4 * (intColor1[1] - intColor2[1])**2 + 3 * (intColor1[2] - intColor2[2])**2)",
"def color_dist(c1, c2):\n return sum((a - b) ** 2 for a, b in zip(to_ycc(c1), to_ycc(c2)))",
"def image_distance(self, image_a, image_b):\n return np.average(np.abs(image_a - image_b))",
"def calc_distance(color_a,color_b):\r\n distance = (color_a[0]-color_b[0])**2+(color_a[1]-color_b[1])**2+(color_a[2]-color_b[2])**2\r\n distance = distance**0.5\r\n return distance",
"def calc_color_distance(rgb1, rgb2):\n\n color1_rgb = sRGBColor(rgb1[0], rgb1[1], rgb1[2])\n color2_rgb = sRGBColor(rgb2[0], rgb2[1], rgb2[2])\n\n color1_lab = convert_color(color1_rgb, LabColor)\n color2_lab = convert_color(color2_rgb, LabColor)\n\n delta_e = delta_e_cie2000(color1_lab, color2_lab)\n return delta_e",
"def _color_distance(first_color: list, second_color: list) -> float:\n first_color_hls = colorsys.rgb_to_hls(*_normalize_color(first_color))\n second_color_hls = colorsys.rgb_to_hls(*_normalize_color(second_color))\n hue_distance = min(\n abs(first_color_hls[0] - second_color_hls[0]),\n 1 - abs(first_color_hls[0] - second_color_hls[0]),\n )\n return hue_distance",
"def distance(rgb1: Tuple[int, int, int], rgb2: Tuple[int, int, int]) -> float:\n r = rgb1[0] - rgb2[0]\n g = rgb1[1] - rgb2[1]\n b = rgb1[2] - rgb2[2]\n return math.sqrt(r**2 + g**2 + b**2)",
"def merge_colours(pixels, max_distance=5):\n pixels = copy(pixels)\n\n distances = [(item,\n np.sqrt(np.sum((np.array(item[0])-np.array(item[1]))**2))) for item in product(pixels.keys(),\n pixels.keys())]\n\n for pair, distance in distances:\n if pair[0] == pair[1]:\n continue # comparing the same colours\n if distance < max_distance:\n try:\n if pixels[pair[0]] > pixels[pair[1]]:\n pixels[pair[0]] += pixels[pair[1]]\n del pixels[pair[1]]\n else:\n pixels[pair[1]] += pixels[pair[0]]\n del pixels[pair[0]]\n except KeyError:\n continue # one of them is already gone\n\n return pixels",
"def color_distance(color1, color2):\n dist_h = color1[0] - color2[0]\n dist_s = color1[1] - color2[1]\n dist_v = color1[2] - color2[2]\n\n return sqrt(dist_h * dist_h + dist_s * dist_s + dist_v * dist_v)",
"def averageColorInRegion(self,x1,y1,x2,y2,skip_factor):\n \n\n rgb = [0, 0, 0, 0]\n temp = [0, 0, 0, 0]\n pixels = abs(((x2-x1) / skip_factor) * ((y2-y1) / skip_factor))\n\n #switching endpoints so iteration is positive\n if (x1 > x2):\n temp = x2\n x2 = x1\n x1 = temp\n\n if (y1 > y2):\n temp = y2\n y2 = y1\n y1 = temp\n\n for i in range(x1, x2, skip_factor):\n for j in range(y1, y2, skip_factor):\n temp = self.pixel(i, j)\n \n #rgb[0] += temp[0] * temp[3]/255 #Sum plus alpha correction\n #rgb[1] += temp[1] * temp[3]/255\n #rgb[2] += temp[2] * temp[3]/255\n #rgb[3] += temp[3]\n\n rgb[0] += temp[0] \n rgb[1] += temp[1]\n rgb[2] += temp[2] \n\n for i in range(4):\n rgb[i] = int(rgb[i] / pixels * brightness)\n #rgb[i] = int( (rgb[i] / pixels * brightness) * alpha)\n if (rgb[i] > 255):\n #cutting off at 255 - need to find the problem later\n rgb[i] = 255\n\n #if (rgb[i] < 20):\n # rgb[i] = 0\n \n\n return rgb",
"def total_histogram_diff(pixel_diff):\n return sum(i * n for i, n in enumerate(pixel_diff.histogram()))",
"def pixel_distance(stars):\n distances = np.zeros((stars.shape[0], stars.shape[0]))\n logger.info('Start')\n for i in range(stars.shape[0]):\n if i < stars.shape[0] - 1:\n dis1 = stars[i, 0] - stars[i + 1:, 0]\n dis2 = stars[i, 1] - stars[i + 1:, 1]\n dis = np.sqrt(dis1 ** 2 + dis2 ** 2)\n distances[i][i + 1:] = dis\n logger.info('The End!')\n return distances",
"def merge_mean_color(graph, src ,dst):\n\n graph.nodes[dst]['total color'] += graph.nodes[src]['total color']\n graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']\n graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] / graph.nodes[dst]['pixel count'])",
"def average_distance(c1, c2):\n return sum(sum(symmetric_distances[p1][p2] for p1 in c1) for p2 in c2) \\\n / (len(c1) * len(c2))",
"def meanrgb(color1,color2):\r\n if check_colormath:\r\n srgb1 = sRGBColor(color1[0],color1[1],color1[2])\r\n srgb2 = sRGBColor(color2[0],color2[1],color2[2])\r\n\r\n lab1 = convert_color (srgb1,LabColor)\r\n lab2 = convert_color (srgb2,LabColor)\r\n lab1tuple = SpectralColor.get_value_tuple(lab1)\r\n lab2tuple = SpectralColor.get_value_tuple(lab2)\r\n labAtuple = ( (lab1tuple[0] + lab2tuple[0])/2.0 , (lab1tuple[1] + lab2tuple[1])/2.0,\r\n (lab1tuple[2] + lab2tuple[2])/2.0 )\r\n labA = LabColor(labAtuple[0],labAtuple[1],labAtuple[2])\r\n rgbA = convert_color(labA,sRGBColor)\r\n rgbAtuple = SpectralColor.get_value_tuple(rgbA)\r\n return list(rgbAtuple)\r\n else:\r\n acolor = [0,0,0]\r\n for j in range(3):\r\n ## this seems to give a useful average color\r\n meancolor = (color1[j] + color2[j])/2.0\r\n # now lighten it a bit\r\n acolor[j] = (1.0 - (0.8 * (1.0 -meancolor )))\r\n return acolor",
"def colorDistance(self, color = (0, 0, 0)):\n return spsd.euclidean(np.array(color), np.array(self.meanColor()))",
"def dist_colorweight(ele1, ele2):\n \n dist_colorweight_v = ele1[2]*ele2[2]*dist_euclidean(ele1[0:2], ele2[0:2])\n return dist_colorweight_v",
"def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones):\n\n # complete diff \"image\" to the size of step image\n diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1)\n\n # ignore excluded pixels\n diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height)\n \n # draw mask of differences\n mask = numpy.ones((image_height, image_width, 1), dtype=uint8)\n diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8)\n cnd = diff[:,:] > 0 # says which pixels are non-zeros\n diff_image[cnd] = mask[cnd]\n diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels\n\n diff_pixels = numpy.transpose(diff.nonzero());\n \n return diff_pixels, diff_image",
"def patch_average_error(self, image_1, image_2, height, width, center_x, center_y):\n size = tf.constant([height, width], dtype=tf.int32)\n offset = tf.constant([[center_x, center_y]], dtype=tf.float32)\n image_1 = tf.constant(image_1, dtype=tf.float32)\n image_2 = tf.constant(image_2, dtype=tf.float32)\n #print(image_1.get_shape().as_list(), image_2.get_shape().as_list())\n patch_1 = tf.image.extract_glimpse(image_1, size, offset, centered=False, normalized=True)\n patch_2 = tf.image.extract_glimpse(image_2, size, offset, centered=False, normalized=True)\n\n shape_1 = patch_1.get_shape().as_list()\n shape_2 = patch_2.get_shape().as_list()\n assert shape_1 == shape_2, (\n 'Patch to compare must have the same shape'\n )\n patch_1 = tf.squeeze(patch_1)\n patch_2 = tf.squeeze(patch_2)\n mean_pixel_error = tf.reduce_mean(tf.sqrt(tf.square(patch_1-patch_2)))\n\n return mean_pixel_error, patch_1, patch_2",
"def color_averages(img):\n return np.average(img, axis = (0, 1))",
"def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL",
"def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL",
"def all_distances(self):\n points = self.color_lookup_table_points\n\n red = np.repeat(np.expand_dims(points[0], axis=0), points[0].size, axis=0)\n green = np.repeat(np.expand_dims(points[1], axis=0), points[1].size, axis=0)\n blue = np.repeat(np.expand_dims(points[2], axis=0), points[2].size, axis=0)\n\n self.distances = np.sqrt(\n np.square(red - red.transpose())\n + np.square(green - green.transpose())\n + np.square(blue - blue.transpose()))",
"def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))",
"def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference",
"def average(rgb1: Tuple[int, int, int], rgb2: Tuple[int, int, int]) \\\n -> Tuple[int, int, int]:\n r = int((rgb1[0] + rgb2[0] + 0.5) / 2)\n g = int((rgb1[1] + rgb2[1] + 0.5) / 2)\n b = int((rgb1[2] + rgb2[2] + 0.5) / 2)\n return r, g, b",
"def image_diff(image_a, image_b):\n histogram_diff = total_histogram_diff(pixel_diff(image_a, image_b))\n\n return histogram_diff"
]
| [
"0.6561033",
"0.6329309",
"0.62736565",
"0.62726533",
"0.62004334",
"0.61687744",
"0.6100622",
"0.60588473",
"0.5993518",
"0.59609854",
"0.59419906",
"0.593154",
"0.5862414",
"0.57588434",
"0.5679564",
"0.5677268",
"0.56682885",
"0.5631702",
"0.5587156",
"0.55482876",
"0.55018073",
"0.5497573",
"0.5483053",
"0.5446544",
"0.5446544",
"0.5446227",
"0.5441422",
"0.54334635",
"0.5428346",
"0.54085284"
]
| 0.6925194 | 0 |
Get the price of this membership as a string of pounds and pence. | def price_pounds(self):
price = '{0:03d}'.format(self.price)
return price[:-2] + '.' + price[-2:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_price(self):\n return self.price",
"def get_price(self):\n return self.price",
"def get_price(self):\n return self.price",
"def price(self):\n return self.price_",
"def display_price(self):\n return '$ '+str(self.price)",
"def get_price(self):\n return self._price",
"def get_price(self):\r\n return self.price",
"def get_price(self):\n return f'{self.soup.find(attrs={\"class\": \"woocommerce-Price-amount\"}).text}'",
"def price(self):\n return self._price",
"def price(self):\n return self._price",
"def getPrice(self):\n return self.price",
"def get_price(self):\n if self.price is None:\n price = self.data['pizza'][self.item_type][self.size]\n for topping in self.toppings:\n price += self.data['topping'][topping]\n return price\n return self.price",
"def price(self) -> float:\n return self._price",
"def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result",
"def BuyingPrice(self):\n return self.buying_rice",
"def get_product_price(self):\n\n price = \"0.0000\"\n\n try:\n price = self.trees.get_element_by_id(\"priceblock_ourprice\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"price_inside_buybox\").text\n except:\n try:\n price = self.trees.get_element_by_id(\n \"priceblock_dealprice\").text\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-color-price']/text()\")[0]\n except:\n try:\n price = self.trees.xpath(\n \"//span[@class='a-size-base a-color-price']/text()\")[0]\n except:\n pass\n\n non_decimal = re.compile(r'[^\\d.]+')\n price = non_decimal.sub('', price)\n\n return round(float(price[0:5]), 2)",
"def pound():\r\n price = give_price_websites_1(\"https://www.tgju.org/profile/price_gbp\")\r\n\r\n if users_language[update.effective_chat.id] == \"english\":\r\n return \"pound : \" + format(price/10000, '.2f') + ' kTomans'\r\n elif users_language[update.effective_chat.id] == \"persian\":\r\n return \" هزارتومان\" + format(price/10000000, '.3f') + \"پوند : \"",
"def get_price(self):\n return self.sale_price if self.sale_price else self.price",
"def price(self):\n return self._safe_value(VAR_PRICE, float)",
"def getPrice(self):\n priceElem = self.driver.find_element_by_xpath(self.priceXPath)\n price = priceElem.text.replace(\"€\", \"\").replace(\" \", \"\").replace(\",\", \".\")\n return float(price)",
"def __get_deal_price(self):\n return self.create_random_decimal(min=1, max=100000)",
"def get_formated_price(\n amount: Decimal,\n precision: int = DEFAULT_DECIMAL_PLACES\n) -> str:\n return \"{:0.0{}f}\".format(amount, precision)",
"def format_price(self, price):\n precision = self._price_limits[3] or 8\n tick_size = self._price_limits[2] or 0.00000001\n\n adjusted_price = truncate(round(price / tick_size) * tick_size, precision)\n formatted_price = \"{:0.0{}f}\".format(adjusted_price, precision)\n\n # remove tailing 0s and dot\n if '.' in formatted_price:\n formatted_price = formatted_price.rstrip('0').rstrip('.')\n\n return formatted_price",
"def total_price(self):\n return self.owner.total_price()",
"def desired_price(self):\n return self._desired_price",
"def SellingPrice(self):\n return self.selling_price",
"def pricing(self):\n if self._pricing is None:\n self._pricing = Pricing(self)\n return self._pricing",
"def get_total(self):\r\n \r\n return str(round(self._total, 2))",
"def get_price_dollar(self):\n price = self.get_price()\n currency_code = self.get_currency_code()\n if price and currency_code:\n if currency_code != 'USD':\n return currency(price, from_money=currency_code, to_money='USD')\n return price",
"def base_price(self):\n return self._base_price"
]
| [
"0.7219085",
"0.7219085",
"0.7219085",
"0.7204589",
"0.7165673",
"0.7139111",
"0.7138113",
"0.70815927",
"0.7050938",
"0.7050938",
"0.69639355",
"0.6915576",
"0.68036145",
"0.66906595",
"0.6688339",
"0.66788113",
"0.65983194",
"0.6585856",
"0.6581671",
"0.6524062",
"0.6460041",
"0.6433464",
"0.6396544",
"0.6378623",
"0.63596153",
"0.63559407",
"0.6333303",
"0.6321773",
"0.6314707",
"0.63099825"
]
| 0.7743524 | 0 |
Reports API calls in highest to lowest usage order. | def stats():
# Log all API requests
exception = log_api()
if exception:
return jsonify({'error': exception}), HTTPStatus.INTERNAL_SERVER_ERROR
try:
data = redis.zrevrangebyscore(
REDIS_LOG_KEY_NAME,
REDIS_INT64_MAX,
0,
withscores=True)
# The redis response is an ordered list of lists:
# [["/stats",13.0],["/api/1/2/3/4/5",6.0]]
# and I prefer an ordered list of dictionaries so the
# caller doesn't need to guess which is which.
# Ordering is by descending request count
response_data = []
for row in data:
response_data.append({'count': row[1], 'url': row[0]})
return jsonify(response_data), HTTPStatus.OK
except RedisError as exc:
return jsonify({'error': exc}), HTTPStatus.INTERNAL_SERVER_ERROR | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def api_call_statistics(self):\n try:\n days = int(request.args.get('days'))\n except TypeError:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"days\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"days\", 400\n api_call_statistics = self.statistic_database.compute_statistics(days)\n last_days_uploaded_videos = {k.isoformat(): v for k, v in api_call_statistics.last_days_uploaded_videos.items()}\n last_days_user_registrations = {k.isoformat(): v for k, v in api_call_statistics.last_days_user_registrations.items()}\n last_days_users_logins = {k.isoformat(): v for k, v in api_call_statistics.last_days_users_logins.items()}\n last_days_api_call_amount = {k.isoformat(): v for k, v in api_call_statistics.last_days_api_call_amount.items()}\n last_day_mean_api_call_time = {k.isoformat(): v for k, v in api_call_statistics.last_day_mean_api_call_time.items()}\n return json.dumps({\"last_days_uploaded_videos\": last_days_uploaded_videos,\n \"last_days_user_registrations\": last_days_user_registrations,\n \"last_days_users_logins\": last_days_users_logins,\n \"last_days_api_call_amount\": last_days_api_call_amount,\n \"last_day_mean_api_call_time\": last_day_mean_api_call_time,\n \"last_days_api_calls_by_path\": api_call_statistics.last_days_api_calls_by_path,\n \"last_days_api_calls_by_status\": api_call_statistics.last_days_api_calls_by_status,\n \"last_days_api_calls_response_times_sample\": api_call_statistics.last_days_api_calls_response_times_sample,\n \"last_days_api_calls_by_method\": api_call_statistics.last_days_api_calls_by_method\n })",
"def api_call_tracker():\n calls = 0\n\n # try part: checks to see if the file exists. If it does, it'll read from it and set the calls variable to whatever\n # number is read from the file.\n # except part: if file is not found, it'll create the file and writes a 1 to it. This only should happen when the\n # file is initially created when the first API call is made. That's why it's writing 1 to the file.\n # the Except should be called only on the very first API call. The Try should be called for every API recording\n # hereafter.\n try:\n file = open(\"api_tracker.txt\", \"r\")\n for item in file:\n calls = int(item)\n file.close()\n\n except FileNotFoundError:\n file = open(\"api_tracker.txt\", \"w\")\n file.write(\"1\")\n file.close()\n print(\"API Calls: 1\")\n return None\n\n calls += 1\n calls = str(calls)\n\n file = open(\"api_tracker.txt\", \"w\")\n file.write(calls)\n print(f'API Calls: {calls}')\n file.close()",
"def get_api_calls(api_functions=api_functions, ignore_unfound=False):\n functions = [(n,f) for (n,f) in api_functions if getattr(f, \"is_api\", False)]\n functions = sorted(functions, key=lambda (n,f): n)\n ret = []\n for function in functions:\n try:\n ret.append(APICall(function))\n except NoReverseMatch:\n if not ignore_unfound:\n raise\n return ret",
"def callstats(): # real signature unknown; restored from __doc__\n return ()",
"def check_api(self, api, cost, details):\n current = self.data.get_metric(api)\n max = self.limits.lookup_api(api)\n\n if max is None:\n return\n\n if current > max:\n details['current_metric'] = current\n details['max_metric'] = max\n self.error(api = api, details = details)\n\n self.data.add_metric_cost(api, cost)",
"def api_call_counter(self, api_call_counter):\n\n self._api_call_counter = api_call_counter",
"def api_callcounter():\n try:\n return jsonify({'callcounter': get_model().call_counter})\n except Exception as e:\n response = jsonify({'error': 'API error'})\n response.status_code = 400\n return response",
"def __call__(self):\n for resource in self.resources:\n self._evaluate_resource(resource)\n self.perfdata = sorted([p for p in self.perfdata if p])",
"def api_used(self, api_used):\n\n self._api_used = api_used",
"def get_etherscan_calls() -> int:\n return _get_counter(\"etherscan_calls\")",
"def feature_dynamic_windowsapi(self):\n self.features[\"api_stats\"] = {}\n apistats = self.report.get(\"behavior\", {}).get(\"apistats\", {})\n for d in apistats:\n for e in apistats[d]:\n if e in self.features[\"api_stats\"]:\n self.features[\"api_stats\"][e] += apistats[d][e]\n else:\n self.features[\"api_stats\"][e] = apistats[d][e]",
"def get_last_calls(self):\r\n last_calls = self.last_calls\r\n self.last_calls = []\r\n return last_calls",
"def test_get_stats(self):\n pass",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def increment_etherscan_calls():\n _increment_counter(\"etherscan_calls\")",
"def run(self) -> None:\n self.urls_list = self._create_api_ulr_list()\n self.results = self._sort_results(\n AsyncGetAPI(\n self.urls_list, self.threads, max_requests=self.max_requests\n ).results\n )",
"def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')",
"def process_calls():\n try:\n sdplus_api = API(os.environ['SDPLUS_ADMIN'], 'http://sdplus/sdpapi/')\n if not sdplus_api:\n raise KeyError\n except KeyError:\n print('Windows environment varible for \"SDPLUS_ADMIN\" (the API key for sdplus) wasn\\'t found. \\n'\n 'Please correct using \"\"setx SDPLUS_ADMIN <insert your own SDPLUS key here>\" in a command line.')\n sys.exit(1)\n result = []\n all_queues = sdplus_api.request_get_requests('Back Office Third Party/CSC_QUEUE')\n for each_call in all_queues:\n conversations = sdplus_api.request_get_all_conversations(each_call['workorderid'])\n each_call['classification'] = classify_call(conversations)\n each_call['Others involved'] = find_all_people_involved(conversations, each_call['requester'])\n each_call['CSC open/reopen date'] = find_date_csc_opened_call(conversations)\n each_call['CSC severity'] = find_csc_severity(conversations)\n result.append(each_call)\n return result",
"def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/ApiUsage/{applicationId}/\"))",
"def stats(self):\n pass",
"def isThereApiCalls(report):\n with open(report, \"rb\") as r:\n data = json.load(r)\n for i in range(numProcs(report)):\n if len(data[\"behavior\"][\"processes\"][i][\"calls\"]) > 0:\n return True\n else:\n continue\n return False",
"def api_call(func):\n def inner(*args, **kwargs):\n # Rate limit is 'max_request' requests per 'min_time' seconds\n max_requests = 20\n min_time = 120\n\n now = time.time()\n\n # Trim API_CALL_TIMES to calls made recently\n if API_CALL_TIMES:\n while now - API_CALL_TIMES[0] > min_time:\n API_CALL_TIMES.pop(0)\n\n # If 100 or more then wait long enough to make this next request\n if len(API_CALL_TIMES) >= max_requests:\n n = min_time - now + API_CALL_TIMES[0] + 2 # Add 2s leeway...\n print(\"[WARNING] Waiting {} seconds to avoid reaching rate limit...\".format(int(n)))\n time.sleep(n)\n\n API_CALL_TIMES.append(time.time())\n\n return func(*args, **kwargs)\n\n return inner",
"def lookup_api(self, api):\n return parse_limit(self.apis.get(api))",
"def test_success_metrics(self):\n @self.graph.route(self.ns.collection_path, Operation.Search, self.ns)\n def foo():\n return \"\"\n\n response = self.client.get(\"api/v1/foo\")\n assert_that(response.status_code, is_(equal_to(200)))\n\n self.graph.metrics.histogram.assert_called_with(\n \"route\",\n ANY,\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n ],\n )\n self.graph.metrics.increment.assert_called_with(\n \"route.call.count\",\n tags=[\n \"endpoint:foo.search.v1\",\n \"backend_type:microcosm_flask\",\n \"classifier:2xx\",\n ],\n )",
"def stats(self):",
"def runAnalytics():\n #gets OAuth from the API\n analytics = get_Analytics_service()\n #get the object return from the API\n #send that object to print out useful fields\n response = get_report(analytics)\n print_response(response)",
"def get_apis(self, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass",
"def __init__(self, api, max_workers=5, count_metrics=None, time_metrics=None,\n gauge_metrics=None, max_call_count=-1, max_time_between_calls=-1):\n super(ApiMetrics, self).__init__(count_metrics=count_metrics, time_metrics=time_metrics,\n gauge_metrics=gauge_metrics, max_call_count=max_call_count,\n max_time_between_calls=max_time_between_calls)\n self._api = api\n self._thread_pool_executor = ThreadPoolExecutor(max_workers=max_workers)",
"def test_get(self, app, data_queues, metricsmock, logs):\n res = self._call(app, ip=self.test_ip, method=\"get\", status=200)\n self.check_response(data_queues, res, \"ok\")\n self.check_queue(data_queues, 0)\n\n metricsmock.assert_incr_once(\n \"request\", tags=[self.metric_path, \"method:get\", \"status:200\"]\n )\n metricsmock.assert_timing_once(\n \"request.timing\", tags=[self.metric_path, \"method:get\"]\n )\n\n log = logs.only_entry\n expected_entry = {\n # accuracy is low for region API fixture, and medium for geolocate\n # see bound_model_accuracy and related tests for direct calculation\n \"accuracy\": logs.only_entry[\"accuracy\"],\n \"accuracy_min\": \"low\",\n \"api_key\": \"test\",\n \"api_path\": self.metric_path.split(\":\")[1],\n \"api_type\": self.metric_type,\n \"blue\": 0,\n \"blue_valid\": 0,\n \"cell\": 0,\n \"cell_valid\": 0,\n \"duration_s\": log[\"duration_s\"],\n \"event\": f\"GET {self.url} - 200\",\n \"fallback_allowed\": False,\n \"has_geoip\": True,\n \"has_ip\": True,\n \"http_method\": \"GET\",\n \"http_path\": self.url,\n \"http_status\": 200,\n \"log_level\": \"info\",\n \"region\": \"GB\",\n \"result_status\": \"hit\",\n \"source_geoip_accuracy\": log[\"accuracy\"],\n \"source_geoip_accuracy_min\": \"low\",\n \"source_geoip_status\": \"hit\",\n \"wifi\": 0,\n \"wifi_valid\": 0,\n }\n if self.metric_type == \"locate\":\n expected_entry[\"api_key_count\"] = 1\n expected_entry[\"api_response_sig\"] = log[\"api_response_sig\"]\n assert log == expected_entry",
"def request_more_resources():\n logger.info(\"NEED MORE RESOURCES!!!!\")"
]
| [
"0.5769262",
"0.5703401",
"0.55801475",
"0.55431765",
"0.55145115",
"0.5480181",
"0.5457197",
"0.5449271",
"0.5439773",
"0.5354681",
"0.53495103",
"0.53266394",
"0.5295815",
"0.52953917",
"0.5255003",
"0.5252155",
"0.525124",
"0.5247804",
"0.52282375",
"0.52236444",
"0.52219564",
"0.52192247",
"0.521339",
"0.5209942",
"0.52091086",
"0.51543206",
"0.5146934",
"0.5142275",
"0.5128685",
"0.5124135"
]
| 0.6157702 | 0 |
Log the API request in redis. | def log_api():
try:
redis.zincrby(REDIS_LOG_KEY_NAME, 1, request.path)
except RedisError as exc:
return exc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rest_api_log(self):\n with self.resource_lock:\n pass",
"def log_request(self, r):\n\n token = r.headers.get(self.header, None)\n r.token = token\n self.requests.append(r)\n if r.token:\n self.log.debug('[%s] %s', token or '/', r.url)",
"def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')",
"def log():\n data = {}\n log = {}\n log['dia'] = date.today().strftime(\"%d/%m/%Y\")\n log['info'] = ('Rooms IP: %s %s %s')%(request.remote_addr,request.method, request.url)\n data['data'] = log\n try:\n r = requests.post(uri, json=data)\n except requests.exceptions.RequestException as e:\n print(e)\n print(\"\\n\\nThe microservice Log is unvailable. The Log is %s.\"%(log['info']))\n else:\n if r.status_code == 200:\n print(\"Register Log was a success\")\n else:\n print(\"Register Log was an unsuccess\")",
"def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response",
"def log_request(self, key, path, headers, body):\n\n if not body:\n body = {}\n # Build a dict with key, headers and body.\n now = datetime.datetime.now().isoformat()\n data = {'request_id': key, 'body': body, 'path': path, 'created': now}\n for k, v in headers.items():\n data[k] = v\n # Put to Dynamodb as a separated thread.\n threading.Thread(target=put_to_dynamodb, args=(data,)).start()",
"def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)",
"def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)",
"def onRequestStart(self, api, request):\n logging.info('Request start ({})'.format(request))",
"def api():\n query = dict(request.args)\n socket_io.emit('log', dict(data=str(query)), broadcast=True)\n return jsonify(dict(success=True, message='Received'))",
"def log(self, *args):\n self._check_private_key(\"log data\")\n params = {'private_key': self.privateKey}\n params.update(dict((k, self._encoder.serialize(v))\n for k, v in zip(self.fields, args)))\n response = self._post(self.inputUrl(), params=params)\n\n self._last_headers = response.headers\n self._stats = None",
"def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)",
"def log(self):\n\n\t\theader_dict = dict(request.headers)\n\n\t\ttry:\n\t\t\ttracker_id = header_dict[\"tracker_id\"]\n\t\texcept Exception:\n\t\t\ttracker_id = None\n\t\t\n\t\ttry:\n\t\t\tuser_agent = header_dict[\"User-Agent\"]\n\t\texcept Exception:\n\t\t\tuser_agent = None\n\n\t\ttry:\n\t\t\tlanguage = header_dict[\"Accept-Language\"]\n\t\texcept Exception:\n\t\t\tlanguage = None\n\n\t\ttry:\n\t\t\treferer = header_dict[\"Referer\"]\n\t\texcept Exception:\n\t\t\treferer = None\n\n\t\ttry:\n\t\t\torigin = header_dict[\"Origin\"]\n\t\texcept Exception:\n\t\t\torigin = None\n\n\t\ttry:\n\t\t\tjson_data = request.json\n\t\texcept Exception:\n\t\t\tjson_data = None\n\n\t\ttry:\n\t\t\tplatform = request.user_agent.platform.title()\n\t\texcept Exception:\n\t\t\tplatform = None\n\n\t\ttry:\n\t\t\tbrowser = request.user_agent.browser.title()\n\t\texcept Exception:\n\t\t\tbrowser = None\n\n\t\ttry:\n\t\t\tauth_header_token = header_dict[\"Authorization\"].split(\" \")[1]\n\t\texcept Exception:\n\t\t\tauth_header_token = None\n\t\t\n\t\t## If set to run before a request: This is the default setting\n\t\tif self.pre_request:\n\t\t\[email protected]_request()\n\t\t\tdef run():\n\t\t\t\t## If the path accessed is in the do_not_log list, it is skipped\n\t\t\t\tif request.path in self.do_not_log:\n\t\t\t\t\treturn\n\t\t\t\t## If the path accessed is not in the do_not_log list, it is posted\n\t\t\t\telse:\n\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\"status_code\": 200, ## Assumed to be 200 due to the nature of the function\n\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t}\n\n\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\treturn\n\n\t\t\treturn run\n\t\t\n\t\t## If set to as a wrapper to a function\n\t\telse:\n\t\t\tdef log_decorator(func):\n\n\t\t\t\t@wraps(func)\n\t\t\t\tdef execute(*args, **kwargs):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\n\t\t\t\t\t\tresult_response = make_response(result)\n\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": None,\n\t\t\t\t\t\t\t\"stack_trace\": None,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": result_response.status_code,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tresult = func(*args, **kwargs)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrace = traceback.format_exc()\n\n\t\t\t\t\t\tkwargs = {\n\t\t\t\t\t\t\t\"trace\": trace,\n\t\t\t\t\t\t\t\"exception\": str(e)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\n\t\t\t\t\t\tpost_data = {\n\t\t\t\t\t\t\t\"error\": str(e),\n\t\t\t\t\t\t\t\"stack_trace\": trace,\n\t\t\t\t\t\t\t\"method\": request.method,\n\t\t\t\t\t\t\t\"source_ip\": request.remote_addr,\n\t\t\t\t\t\t\t\"url\": request.url,\n\t\t\t\t\t\t\t\"status_code\": 500,\n\t\t\t\t\t\t\t\"headers\": str(header_dict),\n\t\t\t\t\t\t\t\"user_agent\": user_agent,\n\t\t\t\t\t\t\t\"language\": language,\n\t\t\t\t\t\t\t\"platform\": platform,\n\t\t\t\t\t\t\t\"browser\": browser,\n\t\t\t\t\t\t\t\"referer\": referer,\n\t\t\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\t\t\"auth_header\": auth_header_token,\n\t\t\t\t\t\t\t\"access_time\": datetime.now().strftime(\"%A, %d %B %Y %H:%M:%S\"),\n\t\t\t\t\t\t\t\"logging_access_key\": self.accessKey,\n\t\t\t\t\t\t\t\"json\": json_data,\n\t\t\t\t\t\t\t\"request_params\": str(dict(request.args))\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tself.startPost(post_data)\n\t\t\t\t\t\n\t\t\t\t\treturn result\n\t\t\t\t\n\t\t\t\treturn execute\n\t\t\t\n\t\t\treturn log_decorator",
"def emit(self, record):\n data = getattr(record, 'synchrolog', {})\n if not data:\n return\n\n url = data.pop('url', None)\n\n if not url:\n return\n\n headers = {'Authorization': f'Basic {self.access_token}'}\n response = requests.post(url=url, json=data, headers=headers)\n if response.status_code >= 400:\n print('Could not send logging info to synchrolog server\\n\\n', response.text)",
"def dk_redis(request):\n return _dk_redis(request)",
"def _log_request(res: SpamResult) -> None:\n _log.info(f\"requestId=[{request.id}] result=[{res.label}] reason=[{res.reason}]\")",
"def log(self, message):",
"def LogData(\n serverName: str, pubIp: str, apiName: str, result,\n request: dict):\n log_id = str(uuid.uuid4())\n dateTime = str(datetime.today())\n if \"Image\" in request:\n request['Image'] = str(request['Image'])\n if \"File\" in request:\n request['File'] = str(request['File'])\n\n paramData = json.dumps(request)\n errorInfo = sys.exc_info()\n errorMessage = \"\"\n if errorInfo[2] is not None:\n errorMessage = f'Error at lineNumber: {str(errorInfo[2].tb_lineno)} {str(errorInfo[0])} {str(errorInfo[1])}'\n info = {\n \"log_Id\": log_id, \"ServerName\": serverName, \"DateTime\": dateTime,\n \"PublicIP\": pubIp, \"APIName\": apiName,\n \"Result\": errorMessage + str(result), \"ParameterData\": str(paramData)\n\n }\n logger = logging.getLogger(__name__)\n logger.info(f'{info}')\n return log_id",
"def accessed(self, identifier, **kwargs):\r\n # Do the import here, instead of top-level, so that the model is\r\n # only required when using this throttling mechanism.\r\n from delicious_cake.models import ApiAccess\r\n super(CacheDBThrottle, self).accessed(identifier, **kwargs)\r\n # Write out the access to the DB for logging purposes.\r\n ApiAccess.objects.create(\r\n identifier=identifier,\r\n url=kwargs.get('url', ''),\r\n request_method=kwargs.get('request_method', ''))",
"def on_sync(self):\r\n self.log()",
"def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]",
"def log(self, message):\n print(\"Server#{}\".format(message))\n if self.log_queue:\n self.log_queue.put()",
"def log(self, obj, action):\n action_dict = {'time': time.time(),\n 'action': action}\n self.log_data[obj.get_obj_id()]['actions'].append(action_dict)",
"async def _write_preflight_log(self, command_name, command):\n logging.info('Action submitted: [%s] [%s]', command_name,\n json.dumps(command))",
"def log_request(self, code='-', size='-'):\n pass",
"def log_request(req: 'flask_request', res: str) -> None:\n\n _DBCONFIG = {'user': 'vsearch',\n 'password': 'vsearchpasswd',\n 'host': 'hfrey.de',\n 'database': 'vsearchlogDB'}\n\n conn = mysql.connector.connect(**_DBCONFIG)\n cursor = conn.cursor()\n\n _SQL = \"\"\"insert into log (phrase, letters, ip, browser_string, results)\n values\n (%s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (req.form['phrase'], req.form['letters'], req.remote_addr, req.user_agent.browser, res, ))\n\n cursor.close()\n conn.commit()\n conn.close()",
"def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)",
"def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)\n pass",
"def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()",
"def log_request(self, code='-', size='-'):\n response_time = self.request_time()\n if hasattr(code, 'value'):\n code = code.value\n self.log_message(\n '\"%s\" %s %s %.2fms',\n self.requestline, str(code), str(size), response_time,\n )\n logger.debug(\n \"method=%s url=%s status=%s handler=%s\"\n \" response_time=%s service=web\",\n self.http_method, self.log_data['url'], code,\n self.log_data['handler'], response_time,\n )"
]
| [
"0.6821955",
"0.64709324",
"0.6096815",
"0.5952896",
"0.59290445",
"0.59065264",
"0.5817935",
"0.5794073",
"0.5787633",
"0.5737805",
"0.57194245",
"0.5627782",
"0.5594348",
"0.5563636",
"0.5562566",
"0.55471605",
"0.545164",
"0.5435416",
"0.54187465",
"0.53533846",
"0.53525513",
"0.5319388",
"0.5305309",
"0.5302529",
"0.5288957",
"0.52642554",
"0.526422",
"0.5243414",
"0.5242887",
"0.5240078"
]
| 0.80186987 | 0 |
The decorator avoid print statements to print messages on console. | def disable_print_statements_on_console(func):
@wraps(func)
def wrap(*args, **kw):
suppress_text = io.StringIO()
sys.stdout = suppress_text
result = func(*args, **kw)
sys.stdout = sys.__stdout__
return result
return wrap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def suppressMessages():\n dislin.unit(0)",
"def suppress(self):\n pass",
"def disable_console():\n logger.removeHandler(CONSOLE)",
"def nologger(*args, **kwargs):\n return",
"def _disable_decorator(msg):\n def decorator(func):\n @functools.wraps(func)\n def _wrapper(self, *args, **kwargs):\n raise RuntimeError(msg.format(func.__name__))\n _wrapper.__doc__ = None\n return _wrapper\n return decorator",
"def repl_print_statements():\n pass",
"def suppress(self):\n with open(os.devnull, \"w\") as devnull:\n osout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = osout",
"def print_disabled(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 30, **kwargs)",
"def blockPrint():\n sys.stdout = open(os.devnull, 'w')",
"def suppress_stdout():\n original_stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n yield\n sys.stdout.close()\n sys.stdout = original_stdout",
"def DEBUG(*args, **kwargs):\n if __name__ != \"__main__\":\n print(*args, **kwargs)",
"def dummy_method_silent(self):\n\n pass",
"def valid_console(func):\n return lambda * args, **kwargs: \\\n _check_console(lambda r: None,\n 'console', func, *args, **kwargs)",
"def clear_console(cls):\n print('\\n' * 200)",
"def hook_print():\n sys.stdout = PrintHook()",
"def debug(func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n if args and not kwargs:\n print(\"~ input of {}: args: {}\".format(func.__name__, args))\n elif not args and kwargs:\n print(\"~ input of {}: kwargs: {}\".format(func.__name__, kwargs))\n elif args and kwargs:\n print(\"~ input of {}: args: {}, kwargs: {}\".format(func.__name__, args, kwargs))\n else:\n print(\"~ input of {}: NO_ARGS\".format(func.__name__))\n output = func(*args, **kwargs) # stores the result of the function\n print(\"~ output of {}:\".format(func.__name__), output)\n return output\n\n return decorated",
"def hide_messages():\n\n print(\"Keep uncertainty data?\")\n print(\"NewDatabase(..., keep_uncertainty_data=True)\")\n print(\"\")\n print(\"Hide these messages?\")\n print(\"NewDatabase(..., quiet=True)\")",
"def disableCLangLogger(self):\n pass",
"def warn():\n pass",
"def __call__(self, func):\n @wraps(func)\n def suppressed_func(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return suppressed_func",
"def stdout_on(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwds):\n orig = sys.stdout\n sys.stdout = sys.__stdout__\n try:\n return fn(*args, **kwds)\n finally:\n sys.stdout = orig\n\n return wrapper",
"def _redefine_print(is_main):\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop(\"force\", False)\n if is_main or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print",
"def printed(method):\n\t\tdef wrapper(cls, *args):\n\t\t\tif cls.verbose:\n\t\t\t\treturn method(cls, *args)\n\t\treturn wrapper",
"def trace(filler):\n @decorator\n def dec(func):\n def wrapper(*args, **kwargs):\n indent = filler * wrapper.level\n arguments = ', '.join(str(x) for x in args)\n print('{} --> {}({})'.format(indent, func.__name__, arguments))\n wrapper.level += 1\n\n result = func(*args, **kwargs)\n print('{} <-- {}({}) == {}'.format(indent, func.__name__, arguments, result))\n wrapper.level -= 1\n return result\n wrapper.level = 0\n return wrapper\n return dec",
"def suppress(self):\n return self",
"def printMe():\n print(\"meeeeeee\")",
"def _printable(self):\n pass",
"def try_decoration(self, func):\n while True:\n try:\n func()\n break\n except:\n print(\"\")",
"def disable_logging():\n logging.disable(50)\n yield\n logging.disable(0)",
"def printMe():\n\n print(\"meeeeeee\")"
]
| [
"0.639712",
"0.63451016",
"0.62081826",
"0.61555076",
"0.61396784",
"0.6103333",
"0.6075437",
"0.6004607",
"0.5957251",
"0.5941743",
"0.5940628",
"0.591935",
"0.5900067",
"0.58858734",
"0.5809405",
"0.5779688",
"0.5759825",
"0.5730924",
"0.5709933",
"0.5699943",
"0.5698221",
"0.56823653",
"0.5681862",
"0.56751424",
"0.5645871",
"0.5633961",
"0.5632359",
"0.56241435",
"0.56191033",
"0.5608117"
]
| 0.80535823 | 0 |
This function is a helper to create `PipelineML` object directly from two Kedro `Pipelines` (one of training and one of inference) . | def pipeline_ml_factory(
training: Pipeline,
inference: Pipeline,
input_name: str = None,
conda_env: Optional[Union[str, Path, Dict[str, Any]]] = None,
model_name: Optional[str] = "model",
model_signature: Union[ModelSignature, str, None] = "auto",
**kwargs
) -> PipelineML:
pipeline = PipelineML(
nodes=training.nodes,
inference=inference,
input_name=input_name,
conda_env=conda_env,
model_name=model_name,
model_signature=model_signature,
**kwargs
)
return pipeline | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError",
"def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline",
"def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline",
"def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline",
"def create_pipeline(pipeline_name: Text, \n pipeline_root: Text, \n dataset_name: Text,\n train_steps: data_types.RuntimeParameter,\n eval_steps: data_types.RuntimeParameter,\n accuracy_threshold: data_types.RuntimeParameter,\n ai_platform_training_args: Dict[Text, Text],\n ai_platform_serving_args: Dict[Text, Text],\n beam_pipeline_args: List[Text],\n model_regisrty_uri: Text,\n enable_cache: Optional[bool] = False) -> pipeline.Pipeline:\n\n # Dataset, table and/or 'where conditions' can be passed as pipeline args.\n query=sql_utils.generate_source_query(dataset_name=dataset_name)\n \n # Brings data into the pipeline from BigQuery.\n example_gen = tfx.components.BigQueryExampleGen(\n query=query\n )\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n input_data=example_gen.outputs.examples)\n\n # Import schema from local directory.\n schema_importer = ImporterNode(\n instance_name='RawSchemaImporter',\n source_uri=RAW_SCHEMA_DIR,\n artifact_type=Schema,\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = tfx.components.ExampleValidator(\n stats=statistics_gen.outputs.output, \n schema=schema_importer.outputs.result\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = tfx.components.Transform(\n input_data=example_gen.outputs.examples,\n schema=schema_importer.outputs.result,\n module_file=TRANSFORM_MODULE_FILE\n )\n\n\n # Get the latest blessed model for model validation.\n latest_model_resolver = tfx.components.ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n \n # Train and save model for evaluation and serving.\n trainer = tfx.components.Trainer(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_trainer_executor.GenericExecutor),\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n trainer_executor.GenericExecutor),\n module_file=TRAIN_MODULE_FILE,\n transformed_examples=transform.outputs.transformed_examples,\n schema=schema_importer.outputs.result,\n transform_output=transform.outputs.transform_output,\n base_model=latest_model_resolver.outputs.model,\n train_args={'num_steps': train_steps},\n eval_args={'num_steps': eval_steps},\n custom_config={'ai_platform_training_args': ai_platform_training_args}\n )\n\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_evaluator = tfx.components.Evaluator(\n examples=example_gen.outputs.examples,\n model=trainer.outputs.model,\n baseline_model=latest_model_resolver.outputs.model,\n eval_config=helper.get_eval_config()\n )\n \n # Use a custom AccuracyModelValidator component to validate the model.\n model_validator = AccuracyModelValidator(\n eval_results=model_evaluator.outputs.output,\n model=trainer.outputs.model,\n accuracy_threshold=accuracy_threshold,\n slice_accuracy_tolerance=0.15,\n )\n\n# # Checks whether the model passed the validation steps and pushes the model\n# # to its destination if check passed.\n# pusher = tfx.components.Pusher(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_pusher_executor.Executor),\n# model_export=trainer.outputs.output,\n# model_blessing=model_evaluator.outputs.blessing,\n# #model_blessing=model_validator.outputs.blessing,\n# custom_config={'ai_platform_serving_args': ai_platform_serving_args}\n# )\n \n register = tfx.components.Pusher(\n model=trainer.outputs.model,\n model_blessing=model_validator.outputs.blessing,\n #model_blessing=model_evaluator.outputs.blessing,\n push_destination=tfx.proto.pusher_pb2.PushDestination(\n filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.join(model_regisrty_uri, pipeline_name)))\n )\n \n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, \n statistics_gen, \n schema_importer, \n validate_stats,\n latest_model_resolver,\n transform,\n trainer, \n model_evaluator, \n model_validator, \n #pusher\n register\n ],\n enable_cache=enable_cache,\n beam_pipeline_args=beam_pipeline_args)",
"def auto_ml():\r\n # Reading from file\r\n my_data = my_reader(config.filename, separ=config.file_separ)\r\n\r\n # Binary and Unary columns search\r\n is_binary_list = is_binary(my_data)\r\n is_unary_list = is_unary(my_data)\r\n\r\n # Time columns search\r\n is_time_list = is_time(my_data)\r\n\r\n # To dummy\r\n my_data = to_dummies(my_data)\r\n\r\n # Train-test split\r\n train_df, test_df = \\\r\n my_train_test_split(my_data, act_test_size=config.test_size)\r\n\r\n # Pure numbers will be the input variables\r\n input_vars = to_pure_numbers(my_data)\r\n\r\n # Choosing if it is a regression or classification\r\n global regression, classification\r\n regression, classification = guess_goal(my_data, config.target)\r\n\r\n # Modelling and building the pipeline\r\n n_neighbors = 15\r\n x_df = train_df[input_vars]\r\n if regression:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LinearRegression(fit_intercept=True))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsRegressor(n_neighbors,\r\n weights='distance'))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.BayesianRidge())])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDRegressor())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.ElasticNet())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Ridge())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.Lasso())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestRegressor(max_depth=2,\r\n random_state=0,\r\n n_estimators=100))])\r\n pipe_dict = {0: 'LinearRegression',\r\n 1: 'KNeighborsRegressor',\r\n 2: 'BayesianRidge',\r\n 3: 'SGDRegressor',\r\n 4: 'ElasticNet',\r\n 5: 'Ridge',\r\n 6: 'Lasso',\r\n 7: 'RandomForestRegressor'}\r\n\r\n if classification:\r\n pipe_1 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', LogisticRegression(random_state=42))])\r\n pipe_2 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model',\r\n neighbors.KNeighborsClassifier(n_neighbors))])\r\n pipe_3 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', RandomForestClassifier(n_estimators=100,\r\n max_depth=2,\r\n random_state=0))])\r\n pipe_4 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', linear_model.SGDClassifier())])\r\n pipe_5 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', MLPClassifier())])\r\n pipe_6 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GradientBoostingClassifier())])\r\n pipe_7 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', GaussianNB())])\r\n pipe_8 = Pipeline([('missing', MissingValueHandle()),\r\n ('duplicated', DuplicatedRowHandle()),\r\n ('discretize', Digitize()),\r\n ('standardize', Standardize()),\r\n ('minmaxscaler', MyMinMaxScaler()),\r\n ('model', SVC(gamma='auto'))])\r\n pipe_dict = {0: 'LogisticRegression',\r\n 1: 'KNeighborsClassifier',\r\n 2: 'RandomForestClassifier',\r\n 3: 'SGDClassifier',\r\n 4: 'MLPClassifier',\r\n 5: 'GradientBoostingClassifier',\r\n 6: 'GaussianNB',\r\n 7: 'SVC'}\r\n\r\n # List of pipelines\r\n pipelines = [pipe_1, pipe_2, pipe_3, pipe_4, pipe_5, pipe_6, pipe_7, pipe_8]\r\n\r\n # Fit the pipelines\r\n for pipe in pipelines:\r\n pipe.fit(x_df, train_df[config.target])\r\n\r\n # Is there outlier\r\n outlier_bool = is_outlier(x_df)\r\n\r\n corr_df = x_df.corr()\r\n\r\n # Open new file\r\n result_path = './test_eval/Result_params_' +\\\r\n str(config.filename.split(\"/\")[-1].split(\".\")[0]) + '.txt'\r\n result_file = open(result_path, 'w')\r\n result_file.write(\"Filename: \" + str(config.filename) + '\\n')\r\n result_file.write(\"Target: \" + str(config.target) + '\\n')\r\n if regression:\r\n result_file.write(\"Prediction type: Regression\" + '\\n')\r\n else:\r\n result_file.write(\"Prediction type: Classification\" + '\\n')\r\n result_file.write(\"Test size: \" + str(config.test_size*100) + \"%\" + '\\n')\r\n result_file.write(\"Model input columns: \" + str(input_vars) + '\\n')\r\n result_file.write(\"Used preparations: \" + '\\n')\r\n if config.missing_bool:\r\n result_file.write(\"Missing value handle (\" +\r\n str(config. missing_value_handle) +\r\n \"), \")\r\n if config.min_scaler_bool:\r\n result_file.write(\"Min scaling, \")\r\n if config.standardize_bool:\r\n result_file.write(\"Standardize, \")\r\n if config.to_dummies:\r\n result_file.write(\"To dummies\")\r\n result_file.write('\\n' + \"Discretize columns: \" +\r\n str(config.discretize) + '\\n')\r\n result_file.write(\"Binary columns: \" + str(is_binary_list) + '\\n')\r\n result_file.write(\"Unary columns: \" + str(is_unary_list) + '\\n')\r\n result_file.write(\"Time columns: \" + str(is_time_list) + '\\n')\r\n if outlier_bool:\r\n result_file.write(\"There is outlier in the data.\" + '\\n')\r\n\r\n # Evaluation\r\n result_df = pd.DataFrame()\r\n result_cols = []\r\n for idx, val in enumerate(pipelines):\r\n result_df = pd.concat([result_df,\r\n my_evaluation(val.predict(test_df[input_vars]),\r\n test_df[config.target])])\r\n result_cols.append(pipe_dict[idx])\r\n\r\n result_df.index = result_cols\r\n result_file.close()\r\n\r\n with pd.ExcelWriter(\"./test_eval/Evaluation_\"\r\n + str(config.filename.split(\"/\")[-1].split(\".\")[0])\r\n + \".xlsx\") as writer:\r\n if regression:\r\n result_df.to_excel(writer, sheet_name=\"Regression\")\r\n else:\r\n result_df.to_excel(writer, sheet_name=\"Classification\")\r\n corr_df.to_excel(writer, sheet_name=\"Correlation\")",
"def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline",
"def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe",
"def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]",
"def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)",
"def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe",
"def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])",
"def build_model():\n pipeline = Pipeline([\n ('vectorizer', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n # (), # Feature engineering (word2vec/GloVe)\n (\"clf\", MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))\n ])\n\n return pipeline",
"def set_pipeline(self):\n feateng_steps = self.kwargs.get('feateng', ['runtime', 'country', 'language',\n 'genre', 'age', 'rated', 'released',\n 'writer', 'director', 'actors', 'production'])\n \n pipe_runtime_features = Pipeline([\n ('runtime', SimpleImputer(strategy='constant', fill_value=\"0\")),\n ('runtime_encoder', CleanRuntimeEncoder()),\n ('runtime_scaler', StandardScaler())])\n \n pipe_country_features = Pipeline([\n ('country', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('country_encoder', CleanCountryEncoder())])\n \n pipe_language_features = Pipeline([\n ('language', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('language_encoder', CleanLanguageEncoder())])\n \n pipe_genre_features = Pipeline([\n ('genre', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('genre_transformer', FunctionTransformer(np.reshape, kw_args={'newshape':-1})), \n ('genre_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_age_features = Pipeline([\n ('age', SimpleImputer(strategy='median')),\n ('age_enconder', CleanAgeEncoder())])\n \n pipe_rated_features = Pipeline([\n ('rated', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('rated_encoder', CleanRatedEncoder()),\n ('rated_ohe', OneHotEncoder(handle_unknown='ignore'))])\n \n pipe_released_features = Pipeline([\n ('released', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('released_encoder', CleanReleasedEncoder()),\n ('released_ohe', OneHotEncoder(handle_unknown='ignore'))])\n\n pipe_writer_features = Pipeline([\n ('writer', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('writer_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('writer_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_director_features = Pipeline([\n ('director', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('director_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('director_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_actors_features = Pipeline([\n ('actors', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('actors_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('actors_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_production_features = Pipeline([\n ('production', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('production_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('production_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n # define default feature engineering blocks\n feateng_blocks = [\n ('runtime', pipe_runtime_features, ['Runtime']),\n ('country', pipe_country_features, ['Country']),\n ('genre', pipe_genre_features, ['Genre']),\n ('age', pipe_age_features, ['Year']),\n ('rated', pipe_rated_features, ['Rated']),\n ('released', pipe_released_features, ['Released']),\n ('writer', pipe_writer_features, ['Writer']),\n ('director', pipe_director_features, ['Director']),\n ('actors', pipe_actors_features, ['Actors']),\n ('language', pipe_language_features, ['Language']),\n ('production', pipe_production_features, ['Production'])]\n \n # filter out some blocks according to input parameters\n for block in feateng_blocks:\n if block[0] not in feateng_steps:\n feateng_blocks.remove(block)\n\n features_encoder = ColumnTransformer(feateng_blocks,\n n_jobs=None,\n remainder='drop')\n\n self.pipeline = Pipeline(steps=[\n ('features', features_encoder),\n ('rgs', self.get_estimator())])",
"def setup_ml():\n # load vocabulary\n vocab = open(f\"{VOCABULARY_FILE}\", \"rb\")\n vocab = pickle.load(vocab)\n\n # transformer to preprocess images\n transform_test = transforms.Compose([ \n transforms.Resize(256), \n transforms.RandomCrop(224), \n transforms.RandomHorizontalFlip(), \n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # Initialize the encoder and decoder, and set each to inference mode.\n encoder = EncoderCNN(EMBED_SIZE)\n encoder.eval()\n decoder = DecoderRNN(EMBED_SIZE, HIDDEN_SIZE, VOCAB_SIZE)\n decoder.eval()\n\n # load encoder\n encoder.load_state_dict(\n torch.load(\n os.path.join('./models', ENCODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n\n # load decoder\n decoder.load_state_dict(\n torch.load(\n os.path.join('./models', DECODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n print(\"\\n-- Model components were imported succesfully! -- \\n\")\n return transform_test, encoder, decoder, vocab",
"def similar_bonds_pipeline():\n pipeline = Pipeline(\n steps=[\n ('scaler', StandardScaler()),\n #('encoder', OneHotEncoder()),\n ('pca', PCA(n_components=3)),\n ('knn', KNN()),\n ]\n )\n return pipeline",
"def get_pipelines(emo_best_model_dict, taskname, anal):\r\n parent_dir = Path.cwd().parent\r\n task_name = 'class_' #default\r\n analysis = 'model_anal_' # default\r\n\r\n if taskname == 'r':\r\n task_name = 'reg_'\r\n if anal != 'model':\r\n analysis = anal + '_anal_'\r\n\r\n prev_name = task_name + analysis\r\n emo_pipeline_dict = {}\r\n for emotion, best_model_prop in emo_best_model_dict.items(): # dataset, classifier, vectorizer, k\r\n #Change k = 0 for all_in features\r\n if best_model_prop[0] == 'all_in':\r\n best_model_prop[0] = str(0)\r\n pipeline_path = parent_dir.joinpath('default_results', 'pipelines_' + emotion, prev_name + emotion + '_' + best_model_prop[0] + '_' + best_model_prop[1]\r\n + '_' + best_model_prop[2] + '_' + best_model_prop[3] + '.pkl')\r\n print(pipeline_path)\r\n if os.path.exists(pipeline_path):\r\n pipeline = pd.read_pickle(pipeline_path)\r\n emo_pipeline_dict[emotion] = pipeline\r\n else:\r\n # If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\nPlease, train the models and select the best model for the prediction task by running model_selection > Modelling.py')\r\n sys.exit(1)\r\n print(emo_pipeline_dict)\r\n return emo_pipeline_dict",
"def create_pipeline_flow(\n self, cmp_deriv_subject_directory, nipype_deriv_subject_directory\n ):\n acquisition_model = self.stages[\"Diffusion\"].config.diffusion_imaging_model\n recon_tool = self.stages[\"Diffusion\"].config.recon_processing_tool\n\n recon_model = \"DTI\"\n\n if acquisition_model == \"DSI\":\n recon_model = \"SHORE\"\n else:\n if recon_tool == \"Dipy\" and self.stages[\"Diffusion\"].config.dipy_recon_config.local_model:\n recon_model = \"CSD\"\n elif recon_tool == \"MRtrix\" and self.stages[\"Diffusion\"].config.mrtrix_recon_config.local_model:\n recon_model = \"CSD\"\n\n tracking_model = self.stages[\"Diffusion\"].config.diffusion_model\n\n if tracking_model == \"Deterministic\":\n tracking_model = \"DET\"\n elif tracking_model == \"Probabilistic\":\n tracking_model = \"PROB\"\n\n if self.parcellation_scheme == \"Lausanne2018\":\n bids_atlas_label = \"L2018\"\n elif self.parcellation_scheme == \"NativeFreesurfer\":\n bids_atlas_label = \"Desikan\"\n elif self.parcellation_scheme == \"Custom\":\n bids_atlas_label = self.custom_atlas_name\n if self.custom_atlas_res is not None and self.custom_atlas_res != \"\":\n bids_atlas_label += f'_res-{self.custom_atlas_res}'\n\n # Clear previous outputs\n self.clear_stages_outputs()\n\n # Create diffusion workflow with input and output Identityinterface nodes\n diffusion_flow = pe.Workflow(\n name=\"diffusion_pipeline\",\n base_dir=os.path.abspath(nipype_deriv_subject_directory),\n )\n\n diffusion_inputnode = pe.Node(\n interface=util.IdentityInterface(\n fields=[\n \"diffusion\",\n \"bvecs\",\n \"bvals\",\n \"T1\",\n \"aseg\",\n \"aparc_aseg\",\n \"brain\",\n \"T2\",\n \"brain_mask\",\n \"wm_mask_file\",\n \"roi_volumes\",\n \"roi_graphMLs\",\n \"subjects_dir\",\n \"subject_id\",\n \"parcellation_scheme\",\n ]\n ),\n name=\"inputnode\",\n )\n diffusion_inputnode.inputs.parcellation_scheme = self.parcellation_scheme\n diffusion_inputnode.inputs.atlas_info = self.atlas_info\n\n diffusion_outputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"connectivity_matrices\"]),\n name=\"outputnode\",\n )\n\n diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])\n\n # Data import\n datasource = self.create_datagrabber_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label\n )\n\n # Data sinker for output\n sinker = self.create_datasinker_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label,\n recon_model=recon_model,\n tracking_model=tracking_model\n )\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, diffusion_inputnode, [(\"diffusion\", \"diffusion\"),\n (\"bvecs\", \"bvecs\"),\n (\"bvals\", \"bvals\"),\n (\"T1\", \"T1\"),\n (\"aseg\", \"aseg\"),\n (\"aparc_aseg\", \"aparc_aseg\"),\n (\"brain\", \"brain\"),\n (\"brain_mask\", \"brain_mask\"),\n (\"wm_mask_file\", \"wm_mask_file\")]),\n ]\n )\n # fmt:on\n\n merge_roi_volumes = pe.Node(interface=Merge(5), name=\"merge_roi_volumes\")\n merge_roi_graphmls = pe.Node(interface=Merge(5), name=\"merge_roi_graphmls\")\n\n def remove_non_existing_scales(roi_volumes):\n \"\"\"Returns a list which do not contained any empty element.\n\n Parameters\n ----------\n roi_volumes : list\n A list of output parcellations that might contain empty element\n in the case of the monoscale Desikan scheme for instance\n\n Returns\n -------\n out_roi_volumes : list\n The list with no empty element\n \"\"\"\n out_roi_volumes = []\n for vol in roi_volumes:\n if vol is not None:\n out_roi_volumes.append(vol)\n return out_roi_volumes\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, merge_roi_volumes, [(\"roi_volume_s1\", \"in1\"),\n (\"roi_volume_s2\", \"in2\"),\n (\"roi_volume_s3\", \"in3\"),\n (\"roi_volume_s4\", \"in4\"),\n (\"roi_volume_s5\", \"in5\")]),\n (datasource, merge_roi_graphmls, [(\"roi_graphml_s1\", \"in1\"),\n (\"roi_graphml_s2\", \"in2\"),\n (\"roi_graphml_s3\", \"in3\"),\n (\"roi_graphml_s4\", \"in4\"),\n (\"roi_graphml_s5\", \"in5\")]),\n (merge_roi_volumes, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_volumes\")],),\n (merge_roi_graphmls, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_graphMLs\")],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Preprocessing\"].enabled:\n preproc_flow = self.create_stage_flow(\"Preprocessing\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, preproc_flow, [(\"diffusion\", \"inputnode.diffusion\"),\n (\"brain\", \"inputnode.brain\"),\n (\"aseg\", \"inputnode.aseg\"),\n (\"aparc_aseg\", \"inputnode.aparc_aseg\"),\n (\"brain_mask\", \"inputnode.brain_mask\"),\n (\"wm_mask_file\", \"inputnode.wm_mask_file\"),\n (\"roi_volumes\", \"inputnode.roi_volumes\"),\n (\"bvecs\", \"inputnode.bvecs\"),\n (\"bvals\", \"inputnode.bvals\"),\n (\"T1\", \"inputnode.T1\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Registration\"].enabled:\n reg_flow = self.create_stage_flow(\"Registration\")\n # fmt:off\n diffusion_flow.connect(\n [\n # (diffusion_inputnode,reg_flow,[('T2','inputnode.T2')]),\n (preproc_flow, reg_flow, [(\"outputnode.T1\", \"inputnode.T1\"),\n (\"outputnode.act_5TT\", \"inputnode.act_5TT\"),\n (\"outputnode.gmwmi\", \"inputnode.gmwmi\"),\n (\"outputnode.bvecs_rot\", \"inputnode.bvecs\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.wm_mask_file\", \"inputnode.wm_mask\"),\n (\"outputnode.partial_volume_files\", \"inputnode.partial_volume_files\",),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes\"),\n (\"outputnode.brain\", \"inputnode.brain\"),\n (\"outputnode.brain_mask\", \"inputnode.brain_mask\"),\n (\"outputnode.brain_mask_full\", \"inputnode.brain_mask_full\"),\n (\"outputnode.diffusion_preproc\", \"inputnode.target\"),\n (\"outputnode.dwi_brain_mask\", \"inputnode.target_mask\")]),\n (preproc_flow, sinker, [(\"outputnode.bvecs_rot\", \"dwi.@bvecs_rot\"),\n (\"outputnode.diffusion_preproc\", \"dwi.@diffusion_preproc\"),\n (\"outputnode.dwi_brain_mask\", \"dwi.@diffusion_brainmask\")]),\n ]\n )\n # fmt:on\n if self.stages[\"Registration\"].config.registration_mode == \"BBregister (FS)\":\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, reg_flow, [(\"subjects_dir\", \"inputnode.subjects_dir\"), (\"subject_id\", \"inputnode.subject_id\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Diffusion\"].enabled:\n diff_flow = self.create_stage_flow(\"Diffusion\")\n # fmt:off\n diffusion_flow.connect(\n [\n (preproc_flow, diff_flow, [(\"outputnode.diffusion_preproc\", \"inputnode.diffusion\")]),\n (reg_flow, diff_flow, [(\"outputnode.wm_mask_registered_crop\", \"inputnode.wm_mask_registered\",),\n (\"outputnode.brain_mask_registered_crop\", \"inputnode.brain_mask_registered\",),\n (\"outputnode.partial_volumes_registered_crop\", \"inputnode.partial_volumes\",),\n (\"outputnode.roi_volumes_registered_crop\", \"inputnode.roi_volumes\",),\n (\"outputnode.act_5tt_registered_crop\", \"inputnode.act_5tt_registered\",),\n (\"outputnode.gmwmi_registered_crop\", \"inputnode.gmwmi_registered\",),\n (\"outputnode.grad\", \"inputnode.grad\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.bvecs\", \"inputnode.bvecs\")]),\n (reg_flow, sinker, [(\"outputnode.target_epicorrected\", \"dwi.@bdiffusion_reg_crop\",),\n (\"outputnode.grad\", \"dwi.@diffusion_grad\"),\n (\"outputnode.affine_transform\", \"xfm.@affine_transform\"),\n (\"outputnode.warp_field\", \"xfm.@warp_field\"),\n (\"outputnode.T1_registered_crop\", \"anat.@T1_reg_crop\"),\n (\"outputnode.act_5tt_registered_crop\", \"anat.@act_5tt_reg_crop\",),\n (\"outputnode.gmwmi_registered_crop\", \"anat.@gmwmi_reg_crop\"),\n (\"outputnode.brain_registered_crop\", \"anat.@brain_reg_crop\"),\n (\"outputnode.brain_mask_registered_crop\", \"anat.@brain_mask_reg_crop\",),\n (\"outputnode.wm_mask_registered_crop\", \"anat.@wm_mask_reg_crop\",),\n (\"outputnode.roi_volumes_registered_crop\", \"anat.@roivs_reg_crop\",),\n (\"outputnode.partial_volumes_registered_crop\", \"anat.@pves_reg_crop\",)],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Connectome\"].enabled:\n self.stages[\"Connectome\"].config.probtrackx = False\n self.stages[\"Connectome\"].config.subject = self.global_conf.subject\n con_flow = self.create_stage_flow(\"Connectome\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, con_flow, [(\"parcellation_scheme\", \"inputnode.parcellation_scheme\"),\n (\"atlas_info\", \"inputnode.atlas_info\"),\n (\"roi_graphMLs\", \"inputnode.roi_graphMLs\")]),\n (diff_flow, con_flow, [(\"outputnode.track_file\", \"inputnode.track_file\"),\n (\"outputnode.FA\", \"inputnode.FA\"),\n (\"outputnode.ADC\", \"inputnode.ADC\"),\n (\"outputnode.AD\", \"inputnode.AD\"),\n (\"outputnode.RD\", \"inputnode.RD\"),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes_registered\",),\n (\"outputnode.skewness\", \"inputnode.skewness\"),\n (\"outputnode.kurtosis\", \"inputnode.kurtosis\"),\n (\"outputnode.P0\", \"inputnode.P0\"),\n (\"outputnode.mapmri_maps\", \"inputnode.mapmri_maps\"),\n (\"outputnode.shore_maps\", \"inputnode.shore_maps\")]),\n (con_flow, diffusion_outputnode, [(\"outputnode.connectivity_matrices\", \"connectivity_matrices\")]),\n (diff_flow, sinker, [(\"outputnode.fod_file\", \"dwi.@fod_file\"),\n (\"outputnode.FA\", \"dwi.@FA\"),\n (\"outputnode.ADC\", \"dwi.@ADC\"),\n (\"outputnode.AD\", \"dwi.@AD\"),\n (\"outputnode.RD\", \"dwi.@RD\"),\n (\"outputnode.skewness\", \"dwi.@skewness\"),\n (\"outputnode.kurtosis\", \"dwi.@kurtosis\"),\n (\"outputnode.P0\", \"dwi.@P0\"),\n (\"outputnode.mapmri_maps\", \"dwi.@mapmri_maps\"),\n (\"outputnode.shore_maps\", \"dwi.@shore_maps\")]),\n (con_flow, sinker, [(\"outputnode.streamline_final_file\", \"dwi.@streamline_final_file\"),\n (\"outputnode.connectivity_matrices\", \"dwi.@connectivity_matrices\")]),\n ]\n )\n # fmt:on\n\n return diffusion_flow",
"def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline",
"def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print('\\n===================原 estimator')\n pprint(pipe.named_steps)\n return pipe",
"def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)",
"def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline",
"def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)",
"def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])",
"def run(self):\n args = self._parse_args(self._argv)\n with open(args['yaml']) as yaml_file:\n yaml_dict = yaml.safe_load(yaml_file) # returns list<dict>\n yaml_dict = yaml_dict[0]['machine_learning_setup']\n data = DataIngest(yaml_dict['data']).get()\n return PipelineWrapper(yaml_dict['pipeline']).fit_transform(data)",
"def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe",
"def create_model_and_dataflow_for_inference(builder, conf, inputs):\r\n\r\n conformer_encoder = conformer_builder.ConformerEncoder(builder,\r\n input_dim=conf.mel_bands,\r\n sequence_length=conf.max_spectrogram_length,\r\n encoder_dim=conf.encoder_dim,\r\n attention_heads=conf.attention_heads,\r\n encoder_layers_per_stage=conf.encoder_layers_per_stage,\r\n dropout_rate=conf.dropout_rate,\r\n cnn_module_kernel=conf.kernel_size,\r\n subsampling_factor=conf.subsampling_factor,\r\n dtype=conf.precision)\r\n\r\n conformer_decoder = conformer_builder.ConformerDecoder(builder,\r\n encoder_dim=conf.encoder_dim,\r\n num_symbols=conf.num_symbols,\r\n for_inference=True,\r\n dtype=conf.precision)\r\n\r\n encoder_output = conformer_encoder(inputs[\"mel_spec_input\"])\r\n\r\n with builder.virtualGraph(conf.num_pipeline_stages - 1):\r\n probs_output = conformer_decoder(encoder_output)\r\n\r\n anchor_types_dict = {\r\n probs_output: popart.AnchorReturnType(\"ALL\"),\r\n }\r\n\r\n proto = builder.getModelProto()\r\n dataflow = popart.DataFlow(conf.batches_per_step, anchor_types_dict)\r\n\r\n return proto, probs_output, dataflow",
"def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)",
"def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn",
"def train_pipeline(nlp: spacy.language.Language) -> None:\n if TEXTCAT not in nlp.pipe_names:\n textcat = nlp.create_pipe(TEXTCAT, config={\"exclusive_classes\": False})\n nlp.add_pipe(textcat, last=True)\n else:\n textcat = nlp.get_pipe(TEXTCAT)\n\n for category in CATEGORIES:\n textcat.add_label(category.value)\n\n pipe_exceptions = {TEXTCAT, \"trf_wordpiecer\", \"trf_tok2vec\"}\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n with nlp.disable_pipes(*other_pipes): # only train textcat\n all_data = list(get_classification_training_data())\n random.shuffle(all_data)\n\n training_data = all_data[: len(all_data) - 2]\n validation_data = all_data[len(all_data) - 2 :]\n\n optimizer = nlp.begin_training()\n for itn in range(20):\n losses: Dict[str, Any] = {}\n random.shuffle(training_data)\n batches = minibatch(training_data, size=compounding(4.0, 32.0, 1.001))\n\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)"
]
| [
"0.6714538",
"0.66688484",
"0.6507657",
"0.64806324",
"0.6373428",
"0.6265111",
"0.623573",
"0.6211704",
"0.61999327",
"0.60997534",
"0.605733",
"0.6053077",
"0.6050286",
"0.6048502",
"0.60207784",
"0.60169214",
"0.6010641",
"0.5995517",
"0.595444",
"0.5943461",
"0.59082496",
"0.5891863",
"0.5838368",
"0.5815324",
"0.57827",
"0.5775881",
"0.57719785",
"0.57642335",
"0.5763879",
"0.5753393"
]
| 0.6966813 | 0 |
Ensure disabled_options values are part of the list of options | def _validate_disabled_options(self, proposal) -> List[str]:
if proposal.value is None or not proposal.value:
return []
proposal_diff = set(proposal.value).difference_update(set(self._options_labels))
assert (
not proposal_diff
), f"Invalid passed options for 'disabled_options': {proposal_diff}"
return proposal.value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_disabled_options(self, change) -> None:\n disabled_options = change.new\n self.set_trait(\"_disabled_options_labels\", disabled_options)\n if not self._initializing_traits_:\n if disabled_options:\n if (\n self.grouping\n and self._flat_groupings()[self.index] in disabled_options\n ):\n for index, label in enumerate(self._flat_groupings()):\n if (\n label not in disabled_options\n and label not in self._group_headers\n ):\n self.index = index\n break\n else:\n self.index = None\n elif self._options_labels[self.index] in disabled_options:\n for index, label in enumerate(self._options_labels):\n if label not in disabled_options:\n self.index = index\n break\n else:\n self.index = None\n elif self._options_labels and not self._grouping_labels:\n if self.index == 0:\n self._notify_trait(\"index\", 0, 0)\n else:\n self.index = 0\n else:\n self.index = None",
"def validate_list(self, field: str, valid_options: List[str]):\n val = getattr(self, field)\n if isinstance(val, list):\n for v in val:\n if v not in valid_options:\n raise ConfigError(f'{v} is not a valid option for {field}')\n else:\n if val not in valid_options:\n raise ConfigError(f'{val} is not a valid option for {field}')",
"def can_be_disabled(self) -> bool:\n return True",
"def check_options(self, options):\n return not any(not isinstance(element, str) for element in options)",
"def _validate_options(self):\r\n valid_choices = ('correct', 'partially-correct', 'incorrect')\r\n for option in self.options:\r\n choice = option['choice']\r\n if choice is None:\r\n raise ValueError('Missing required choice attribute.')\r\n elif choice not in valid_choices:\r\n raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(\r\n choice, ', '.join(valid_choices)))",
"def is_disabled(self):\n\n return self.__contains__('disabled')",
"def check_disabled(self):\n return None",
"def test_deprecated(self):\n def new_deprecated():\n return cfg.DeprecatedOpt(uuid.uuid4().hex, group=uuid.uuid4().hex)\n\n opt_names = ['service-type', 'valid-interfaces', 'endpoint-override']\n depr = dict([(n, [new_deprecated()]) for n in opt_names])\n opts = loading.get_adapter_conf_options(deprecated_opts=depr)\n\n for opt in opts:\n if opt.name in opt_names:\n self.assertIn(depr[opt.name][0], opt.deprecated_opts)",
"def test_allProtosDisabledError(self):\n options = Options()\n self.assertRaises(\n UsageError, options.parseOptions, ([\"--no-pop3\", \"--no-smtp\"])\n )",
"def replace_unacceptable_options(options, is_request):\n newopts = []\n if is_request:\n valid = lambda _o: _o.valid_in_request()\n valid_multiple = lambda _o: _o.valid_multiple_in_request()\n else:\n valid = lambda _o: _o.valid_in_response()\n valid_multiple = lambda _o: _o.valid_multiple_in_response()\n last_number = 0\n for opt in sorted_options(options):\n delta = opt.number - last_number\n last_number = opt.number\n if not valid(opt):\n newopts.append(UnrecognizedOption.from_option(opt))\n elif (0 == delta) and not valid_multiple(opt):\n newopts.append(UnrecognizedOption.from_option(opt))\n else:\n newopts.append(opt)\n return newopts",
"def __validate_options__(cls, options):\n pass",
"def raise_for_disabled(self, disabled_tags: Collection[str]):\n tok = self.token()\n if tok.type == TOKEN_TAG and tok.value in disabled_tags:\n raise DisabledTagError(\n f\"{tok.value} usage is not allowed in this context\",\n linenum=tok.linenum,\n )",
"def validateOptions(self):\n SubCommand.validateOptions(self)\n if not re.match('^yes$|^no$', self.options.usedbs):\n raise ConfigurationException(\"--dbs option only accepts the yes and no values (--dbs=yes or --dbs=no)\")\n self.usedbs = 1 if self.options.usedbs == 'yes' else 0\n\n self.outdir = self.options.outdir",
"def is_Disable_allowed(self):\n handler = self.get_command_object(\"Disable\")\n return handler.check_allowed()",
"def set_disabled(self, val):\n self._disabled = val",
"def set_disabled(self, val):\n self._disabled = val",
"def disabled_reason(self) -> Sequence[str]:\n return pulumi.get(self, \"disabled_reason\")",
"def get_disabled(self):\n return self._disabled",
"def get_disabled(self):\n return self._disabled",
"def verify_radio_dropdown_element_is_disabled(driver, locator, value, module, test, pass_message, fail_message):\n wait_for_element_XPATH(driver, locator)\n driver.find_element_by_xpath(locator).click()\n elem = driver.find_element_by_xpath(value)\n is_disabled = elem.get_attribute(\"disabled\")\n try:\n assert is_disabled == 'true'\n except AssertionError:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR - ASSERTION EXCEPTION - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'AssertionError')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message",
"def disabled_deployment_input_names(self):\n return set([\"length\"])",
"def verbs_allowed(self, cls, verbs):\n verbs = {v.upper() for v in verbs}\n verbs.add('OPTIONS')\n assert not set(cls._allowed_methods(cls())).difference(verbs)",
"def disabled(name):\n return not enabled(name)",
"def test_protoDisable(self):\n options = Options()\n options.parseOptions([\"--no-pop3\"])\n self.assertEqual(options._getEndpoints(None, \"pop3\"), [])\n self.assertNotEqual(options._getEndpoints(None, \"smtp\"), [])\n\n options = Options()\n options.parseOptions([\"--no-smtp\"])\n self.assertNotEqual(options._getEndpoints(None, \"pop3\"), [])\n self.assertEqual(options._getEndpoints(None, \"smtp\"), [])",
"def cleanOptions(options):\r\n daemonize = options.pop('daemonize')\r\n _reload = options.pop('reload')\r\n dev = options.pop('dev')\r\n opts = []\r\n store_true = [\r\n '--nocache', '--global_cache', '--traceback', '--quiet', '--loud'\r\n ]\r\n store_false = []\r\n for key, value in options.iteritems():\r\n key = '--' + key\r\n if (key in store_true and value) or (key in store_false and not value):\r\n opts += [key, ]\r\n elif value:\r\n opts += [key, str(value)]\r\n return daemonize, _reload, opts",
"def test_multiple_values_invalid(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"J\", \"G\", \"foo\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\" selected>John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\" selected>George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )",
"def disabled(self, disabled):\n self._disabled = disabled",
"def EnableOptionValueList(self):\n if self.force_auto_sync:\n self.get('EnableOptionValueList')\n return self._EnableOptionValueList",
"def _validate_options(self, rules, operator_name):\n values = []\n option_values = []\n for argument in rules[operator_name]:\n if isinstance(argument, dict) and argument.get(\"source\") == \"answers\":\n option_values = (\n self.questionnaire_schema.answer_id_to_option_values_map.get(\n argument[\"identifier\"]\n )\n )\n else:\n values = argument if isinstance(argument, list) else [argument]\n\n if values and option_values:\n for value in values:\n # Null values are allowed and will not exist in answer options\n if value and value not in option_values:\n self.add_error(\n self.VALUE_DOESNT_EXIST_IN_ANSWER_OPTIONS,\n value=value,\n answer_options=list(option_values),\n )",
"def is_disabled(self):\n return self._tag == 'disabled'"
]
| [
"0.61743283",
"0.6131734",
"0.61109066",
"0.6102916",
"0.6051576",
"0.59848464",
"0.5836519",
"0.5722256",
"0.5664706",
"0.56453407",
"0.56433475",
"0.56023014",
"0.55838335",
"0.5579797",
"0.55560064",
"0.55560064",
"0.5544571",
"0.55273265",
"0.55273265",
"0.55205566",
"0.54966146",
"0.5487112",
"0.5482703",
"0.54702634",
"0.5457304",
"0.5456315",
"0.5421311",
"0.5384907",
"0.5376635",
"0.53671294"
]
| 0.78630537 | 0 |
Put options into desired grouping, updating `options` | def _set_grouping(self, change) -> None:
grouping = self._grouping_full
self.options = self._flat_groupings(grouping)
self.set_trait(
"_grouping_labels",
tuple(
[
(header, tuple([_[0] for _ in options]))
for header, options in grouping
]
),
)
if not self._initializing_traits_:
for index, option in enumerate(self._flat_groupings()):
if (
option not in self.disabled_options
and option not in self._group_headers
):
if self.index == index:
self._notify_trait("index", index, index)
else:
self.index = index
break
else:
self.index = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_switching_options(self, options):\n\n #preserve old \"non-switching options\"\n # pylint: disable=not-an-iterable\n new_options = [opt for opt in self._rec.options if \"grp:\" not in opt]\n new_options += options\n\n self._rec.set_options(new_options)\n\n if not self._rec.activate(self._engine):\n msg = \"Failed to set switching options to: %s\" % \",\".join(options)\n raise XklWrapperError(msg)",
"def condition_group_options(self):\n if \"no-groups\" in self.options and self.options[\"no-groups\"]:\n self.options[\"groups\"] = []\n if \"exclude-groups\" in self.options:\n del self.options[\"exclude-groups\"]\n\n return\n\n super().condition_group_options()",
"def _create_options(self):\n self._OPTIONS = {}",
"def add_options(self, options):\n self.options = merge_dicts(self.options, options)",
"def register_opts(self, conf):\n config.register_opt_group(conf, project_config.service_available_group,\n project_config.ServiceAvailableGroup)\n\n config.register_opt_group(conf, project_config.placement_group,\n project_config.PlacementGroup)\n\n config.register_opt_group(conf, project_config.valet_group,\n project_config.opt_valet)",
"def optgroups(self, name, value, attrs=None):\n options = []\n\n for index, (name, product_data) in enumerate(self.product_fields.items()):\n quantity = product_data['quantity']\n name = product_data['name']\n price = product_data['price']\n if index:\n label = 'product_{}'.format(str(index))\n else:\n label = 'product'\n\n options.append({\n 'value': quantity,\n 'price': price,\n 'name': 'products',\n 'label': name,\n 'type': self.input_type,\n 'template_name': self.option_template_name,\n 'wrap_label': True,\n 'index': index\n })\n\n return options",
"def printOptions():\n\n # For each group, create a group option\n print(\"default\")",
"def settings_group_options():\n return [('', _('No group')), *[(str(a.id), str(a)) for a in Group.objects.all()]]",
"def set_options(self, options):\n self.options = options",
"def register_opts(self, opts, group=None):\n for opt in opts:\n self.register_opt(opt, group, clear_cache=False)",
"def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group",
"def add_to_OptionParser(parser): \n\n parser.set_defaults(input_plugin=None,\n output_plugin=None)\n \n for plugin in registry:\n #print \"add to option parse\", plugin.id\n group = optparse.OptionGroup(parser,\n \"For %s plugin (modify settings BEFORE calling plugin)\" % (plugin.id))\n\n plugin.add_to_OptionParser(parser, group)\n\n for option in getattr(plugin, 'options', ()):\n option.add_to_OptionParser(plugin, parser, group)\n parser.add_option_group(group)",
"def initialize_options(self):",
"def addOptionToGroup(self, groupName, *args, **kwargs):\n group = self._optionGroupDict.get(groupName)\n group.add_argument(*args, **kwargs)",
"def add_args_to_group(cls, group: \"ArgumentGroup\") -> None:\n # group.description = 'For `Architect`, you can supply...'\n # group.add_argument('--server-option', help='Lets you customize')\n return",
"def createOptionsGroup(self):\n self.groupBox = QGroupBox(self.model.get_title())\n self.groupBox.setAlignment(4)\n\n self.load_button = QtGui.QPushButton()\n self.close_button = QtGui.QPushButton()\n\n self.l1 = QLabel(\"Channel: \" + str(self.model.get_channel()))\n self.spin_box = QSpinBox()\n self.spin_box.setMinimumHeight(22)\n\n vbox = QVBoxLayout()\n vbox.addWidget(self.l1)\n vbox.addWidget(self.spin_box)\n vbox.addWidget(self.load_button)\n vbox.addWidget(self.close_button)\n\n self.load_button.setText(\"Load...\")\n self.close_button.setText(\"Close\")\n\n #USE EEG DISPLAY CONTROLLER TO HAVE THE Models LOAD ITS DATA\n loader = DataController(self.model)\n self.load_button.clicked.connect(loader)\n loader.title.connect(self.set_title)\n\n #LET THE MODEL COMMUNICATE IT'S DEAD\n self.close_button.clicked.connect(self.delete)\n\n #Use spin box to switch through channels\n self.spin_box.valueChanged.connect(self.model.set_channel)\n self.spin_box.valueChanged.connect(self.set_channel)\n\n vbox.addStretch(1)\n self.groupBox.setLayout(vbox)\n\n return self.groupBox",
"def add(self, **kwargs):\n group = []\n for name in self.group_by:\n group.append(kwargs.pop(name))\n group = tuple(group)\n if group not in self.data:\n self.data[group] = OrderedDict(self.defaults.copy())\n\n for key in set(kwargs) - set(self.group_by):\n self.data[group][key] = self.data[group].get(key, 0) + kwargs.get(key, 0)",
"def _calculate_options(self, options, option_overrides):\n _options = {}\n _options.update(WidgetSettings.OPTIONS)\n _options.update(options if isinstance(options, dict) else {})\n if 'dateFormat' in _options and 'altFormat' not in _options:\n _options['altFormat'] = _options.pop('dateFormat')\n _options.update(option_overrides)\n self.options = _options",
"def options_argument_group(parser):\n group = parser.add_argument_group(\n \"GLOBAL OPTIONS\",\n \"Options are available for all\" \"arguments within the scope of this command.\",\n )\n\n group.add_argument(\n \"--controller\",\n dest=\"controller\",\n help=\"Use this flag to select the corresponding controller \"\n \"using either the slot number or index.\\nexample: --controller=Slot 0 OR \"\n \"--controller=1\",\n default=None,\n )",
"def _make_opt_list(opts, group):\n import copy\n import itertools\n\n _opts = [(group, list(itertools.chain(*opts)))]\n return [(g, copy.deepcopy(o)) for g, o in _opts]",
"def set_group_selector(*args):\n return _ida_segment.set_group_selector(*args)",
"def _set_disabled_options(self, change) -> None:\n disabled_options = change.new\n self.set_trait(\"_disabled_options_labels\", disabled_options)\n if not self._initializing_traits_:\n if disabled_options:\n if (\n self.grouping\n and self._flat_groupings()[self.index] in disabled_options\n ):\n for index, label in enumerate(self._flat_groupings()):\n if (\n label not in disabled_options\n and label not in self._group_headers\n ):\n self.index = index\n break\n else:\n self.index = None\n elif self._options_labels[self.index] in disabled_options:\n for index, label in enumerate(self._options_labels):\n if label not in disabled_options:\n self.index = index\n break\n else:\n self.index = None\n elif self._options_labels and not self._grouping_labels:\n if self.index == 0:\n self._notify_trait(\"index\", 0, 0)\n else:\n self.index = 0\n else:\n self.index = None",
"def finalize_options(self):",
"def finalize_options(self):",
"def with_group(self, group):\n\t\tself.variables['group'] = group\n\t\treturn self",
"def update(self, options, section=None):\n if section is None:\n section = self.default_sec\n if not self.has_section(section):\n self.add_section(section)\n # change kwargs to be like any other options\n kw = options.pop(\"kwargs\", None)\n if isinstance(kw, dict):\n options.update(kw)\n for k, v in sorted(options.items()):\n self.remove_option(self.default_sec, k)\n self.set(section, k, str(v))",
"def set_options(*args, **kwargs):\n for option in kwargs:\n if option not in BasePlan.options:\n raise BadOption('%s is not a valid, must be a combination '\n 'of %s' % (option, ','.join(BasePlan.options.keys(),)))\n BasePlan.options.update(kwargs)",
"def set_option(self, option, value):\n for option_dict in (\"_general_options\", \"_specific_options\"):\n option_dict = getattr(self, option_dict)\n if option in option_dict:\n if option_dict[option][\"divider\"] != 1:\n value /= float(option_dict[option][\"divider\"])\n setattr(self, \"_\" + option, value)\n break",
"def set_options(self, options):\n self.options = options",
"def set_options(self, options):\n self._set_steps(options.get('bounds', [(0,1)]), options.get('steps',2))"
]
| [
"0.67893493",
"0.62553436",
"0.5973312",
"0.5946463",
"0.5807036",
"0.5781266",
"0.5774601",
"0.5742214",
"0.56995493",
"0.56685764",
"0.56570464",
"0.5636705",
"0.56341064",
"0.5572815",
"0.5570275",
"0.5524016",
"0.5508672",
"0.54946196",
"0.5492006",
"0.5484445",
"0.54833764",
"0.54797214",
"0.54581344",
"0.54581344",
"0.5456417",
"0.5422949",
"0.54161257",
"0.5377751",
"0.5353147",
"0.5351379"
]
| 0.6826257 | 0 |
Get group headers from self._grouping_labels | def _group_headers(self) -> List[str]:
return [_[0] for _ in self._grouping_labels] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_group_names(self):\n return [self.frame.columns[i] for i in self.group_cols]",
"def get_group_names(self):\r\n return self.groups.keys()",
"def _flat_groupings(\n self, grouping: Tuple[Tuple[str, Tuple[Tuple[str, Any]]]] = None\n ) -> List[Tuple[str, Any]]:\n grouping = grouping if grouping is not None else self._grouping_labels\n\n res = []\n for header, options in grouping:\n if header:\n res.append((header, None))\n res.extend(options)\n return res",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))",
"def headers(self):\n fd = open(self.filename, \"r\")\n file_head = fd.readlines()\n fd.close()\n\n zip_heads = zip(file_head[0].split(self.separator),\n file_head[1].split(self.separator))\n\n metric_names = []\n category = \"\"\n for _category, metric in zip_heads:\n # fill empty category names\n if len(_category) is not 0:\n category = _category\n\n metric_names.append(\"%s.%s\" % (category, metric))\n\n return metric_names[:-1]",
"def get_group_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_group_names')",
"def get_group_label(group):\n indices = [a.index for a in group.atoms]\n names = [a.name for a in group.atoms]\n label = []\n for i in range(len(indices)):\n label.append('%d/%s' % (indices[i], names[i]))\n return(' '.join(label))",
"def getMeasHeaders(self):\n headers = []\n for ii in range(self.rows):\n inst = self.instruments[self.stringInsts.index(self.selInsts[ii])]\n param = inst.getParam(self.selParams[ii])\n if type(param.comps) is not list:\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, param, param.units))\n else:\n headers.append(sc.formatHeader(inst, param))\n else:\n for ii,comp in enumerate(param.comps):\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, comp, param.units[ii]))\n else:\n headers.append(sc.formatHeader(inst, comp))\n return headers",
"def get_headers_labels(self) -> List[str]:\n header_labels = []\n first = True\n\n for parameter in self.parameters:\n unit_option = ProfileParamUnitOption.get_unit_option(\n self.request.user.profile, parameter\n )\n param_labels = [\n f\"{a.title().rstrip('s')} ({unit_option.symbol})\"\n if ind != 0 else a.title().rstrip('s')\n for ind, a in enumerate(\n parameter.upload_field_labels.split(', '))\n ]\n if not first:\n param_labels.insert(0, '')\n first = False\n header_labels.extend(param_labels)\n\n return header_labels",
"def groups(self):\n return self.get_data(\"groups\")",
"def data_grouping(self):\n group_container, film_container, plank_container = [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)]\n\n for i in self.data_labels:\n group = int(i[:-1])\n group_container[group - 1].append(i)\n film_container[group - 1].append(self.film_count[self.data_labels.index(i)])\n plank_container[group - 1].append(self.plank_count[self.data_labels.index(i)])\n\n return group_container, film_container, plank_container",
"def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"",
"def test_api_v1_groups_names_get(self):\n pass",
"def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"",
"def get_headers(self):\n headers = []\n for text, level in self._headers:\n headers.append(text)\n return headers",
"def get_headings(self):\n return self.headings",
"def get_country_groups_grid_column_names_by_order(self):\n self.column_name_list = self.get_grid_column_names_by_order(self.country_groups_grid_div_id)\n return self.column_name_list",
"def get_nested_groups_names(group):\n return (\n criterion.findtext(\"value\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"name\") in (\"Computer Group\", \"Mobile Device Group\")\n and criterion.findtext(\"search_type\") == \"member of\")",
"def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text",
"def get_grouped_data(self, field_name):\n pass",
"def _make_grouping(\n grouping: List[Tuple[str, List[str]]]\n) -> Tuple[Tuple[str, Tuple[Tuple[str, Any]]]]:\n return tuple([(header, _make_options(options)) for header, options in grouping])",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def combined_headers(self):\n return vstack([ds.meta[\"headers\"] for ds in self._data.flat])",
"def getGroupName(Id):\r\n return \"Group name\"",
"def group(self):\n return self.tr(self.groupId())",
"def group(self):\n return self.tr(self.groupId())",
"def group(self):\n return self.tr(self.groupId())",
"def group(self):\n return self.tr(self.groupId())",
"def _horizontal_header(self):\n return self.header()"
]
| [
"0.6871165",
"0.6661643",
"0.62353957",
"0.6033604",
"0.5959639",
"0.59267735",
"0.57593286",
"0.5736832",
"0.57264966",
"0.57093465",
"0.5673237",
"0.56410503",
"0.56166875",
"0.5601591",
"0.5574714",
"0.5573216",
"0.5543794",
"0.54342717",
"0.5420204",
"0.53577757",
"0.5335758",
"0.5335329",
"0.5334559",
"0.5325987",
"0.5313654",
"0.53134835",
"0.53134835",
"0.53134835",
"0.53134835",
"0.53071004"
]
| 0.8808598 | 0 |
Return label,valuepair of grouping index. The index is expected to match `_flat_groupings`, i.e., the actual dropdown. | def _get_grouping_label_value(
self,
index: int,
grouping: Tuple[Tuple[str, Tuple[Tuple[str, Any]]]] = None,
) -> Tuple[str, Any]:
grouping = grouping if grouping is not None else self._grouping_full
res = self._flat_groupings(grouping)[index]
if not isinstance(res, tuple) or len(res) != 2:
raise ValueError(
f"Found a grouped value that is not a tuple with length 2: {res!r}"
)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_group_index(self, index):\n\n g_index = 0\n for group in self.groups:\n if group[0] == index:\n g_index = group[1]\n break\n return g_index",
"def _get_group_attributes(self, index):\n\n g_case = (None, None, -1)\n for group in self.group_slots:\n if group[0] == index:\n g_case = group[1]\n break\n return g_case",
"def __getitem__(self, index):\n return self.group_list[index]",
"def get_val_tuple(self, value):\n group_id = ([(idx1, idx2)\n for idx1, category in enumerate(self.groups)\n for idx2, val in enumerate(category) if val == value])\n return group_id[0]",
"def _getGroupIndex(self):\n return copy.deepcopy(self._group_index)",
"def _getGroupIndex(self):\n return copy.deepcopy(self._group_index)",
"def get_value_label(self, value):\n return self.label_config.get_index_label(value)",
"def get_group_label(group):\n indices = [a.index for a in group.atoms]\n names = [a.name for a in group.atoms]\n label = []\n for i in range(len(indices)):\n label.append('%d/%s' % (indices[i], names[i]))\n return(' '.join(label))",
"def map_to_grouping(value: int, grouping: List[List[int]]) -> int:\n for i in range(len(grouping)):\n if value in grouping[i]:\n return i\n\n raise ValueError",
"def label_index(self):\n return self._label_index",
"def label_index(self):\n return self._label_index",
"def getGroup(self, index):\n index = int(index)\n if index < 0:\n return self.top_group1\n elif index > (self.layers - 1):\n index = (self.layers - 1)\n return self.groups[index]",
"def label_from_index(self, index):\n raise NotImplementedError",
"def __getitem__(self, index):\n group = self.groups[index]\n return self.get_x_y(group)",
"def get_group_label(i):\n if i//4 == 0:\n return \"buildUpPlay\"\n elif i//4 == 1:\n return \"chanceCreation\"\n elif i//4 == 2:\n return \"defence\"",
"def get_step_label_at_index(self, index):\n return self[index][1]",
"def group_indexes(self) -> Optional[List[float]]:\n return pulumi.get(self, \"group_indexes\")",
"def group_indexes(self) -> Optional[List[float]]:\n return pulumi.get(self, \"group_indexes\")",
"def group_indexes(self) -> Optional[List[float]]:\n return pulumi.get(self, \"group_indexes\")",
"def get_index_for_group(groups, which_group):\n group_indexes = np.where(groups == float(which_group))[0]\n return group_indexes",
"def __getitem__(self, idx):\n g = self.graph_lists[idx]\n return g, self.graph_labels[idx]",
"def __getitem__(self, idx):\n g = self.graph_lists[idx]\n return g, self.graph_labels[idx]",
"def get_index_2_label(self, index):\n return self._index_2_labels.get(index, self._unknown_label)",
"def __getitem__(self, label_value: int) -> 'SegmentInfo':\n return self.infos[label_value]",
"def group_by(data, index):\n sorted_data = sorted(data, key=lambda x: x[index])\n groupby_data = groupby(sorted_data, lambda x: x[index])\n return groupby_data",
"def get_value_at_index(self, index, cc):\n tl = cc.dsget(self.title)\n return (tl[index], None)",
"def get_grouping_key(self, invoice_tax_val):\n self.ensure_one()\n return str(invoice_tax_val['tax_id']) + '-' + \\\n str(invoice_tax_val['account_id']) + '-' + \\\n str(invoice_tax_val['account_analytic_id'])",
"def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))",
"def return_index(self, idx):\n return (\n self.timeseries[idx],\n self.ch_amount,\n self.freq[idx],\n self.ch_name[idx],\n self.units[idx],\n )",
"def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]"
]
| [
"0.651733",
"0.6260821",
"0.60948837",
"0.60557127",
"0.59572226",
"0.59572226",
"0.57136685",
"0.5596377",
"0.55802846",
"0.5566664",
"0.5566664",
"0.5500841",
"0.5449907",
"0.54491603",
"0.54484546",
"0.54018337",
"0.52841455",
"0.52841455",
"0.52841455",
"0.5266494",
"0.52638173",
"0.52638173",
"0.5256756",
"0.5244484",
"0.5204546",
"0.51935226",
"0.51058006",
"0.5077631",
"0.5025923",
"0.5023145"
]
| 0.75741416 | 0 |
mirror triples from endpoints according to resource paths specified in res_paths. Each resource path is a tuple consisting of a list of start resources and a list of patterns describing which edges to follow. A very simple example of a resource path may consist of just a start resource, e.g. | def mirror (self, res_paths):
self.start_time = time.time()
self.todo = []
self.done = set()
for res_path in res_paths:
resolved_paths = map(
lambda p:
map(
lambda p: (self.resolve_shortcuts(p[0]), p[1]) if type(p) is tuple else
self.resolve_shortcuts(p), p), res_path[1])
for resource in res_path[0]:
if isinstance(resource, basestring):
rs = [ self.resolve_shortcuts (resource) ]
else:
rs = []
for t in self._fetch_ldf(p=self.resolve_shortcuts(resource[0]),
o=self.resolve_shortcuts(resource[1])):
rs.append(t[0])
for r in rs:
for resolved_path in resolved_paths:
# import pdb; pdb.set_trace()
logging.debug ('adding task: %s %s' % (r, repr(resolved_path)))
self.todo.append((rdflib.URIRef(r), resolved_path))
while len(self.todo)>0:
resource, path = self.todo.pop()
todo_new = set()
# fetch resources from LDF only once
if resource in self.done:
triples = list(self.graph.triples((resource, None, None)))
# logging.debug (u'LDF: DONE, %d triples' % len(triples))
do_add = False
else:
triples = self._fetch_ldf (s=resource)
self.done.add(resource)
do_add = True
# transformations
if len(path)>0:
res_filter = path[0]
if type(res_filter) is tuple:
pred, f = res_filter
for t in triples:
s = t[0]
p = t[1]
o = t[2]
if unicode(p) != pred:
continue
np, no = f(o)
np = self.resolve_shortcuts(np)
if do_add:
triples.append ((s, np, no))
res_filter = unicode(np)
if do_add:
for t in triples:
self.graph.add(t)
if len(path)>0:
new_path = path[1:]
for t in triples:
if len(t)<3:
logging.error('triple of 2?! %s' % repr(t))
continue
s = t[0]
p = t[1]
o = t[2]
if not isinstance(o, rdflib.URIRef):
continue
# logging.debug ('LDF checking %s %s' % (p, o))
if res_filter == '*' or res_filter == unicode(p):
# import pdb; pdb.set_trace()
task = (o, new_path)
# logging.debug ('LDF adding new task: %s' % repr(task))
self.todo.append(task) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def paths_from_src(\n src: str = Query(..., description=\"starting article\"),\n dsts: list[str] = Query(..., description=\"destination articles\"),\n db: Session = Depends(database.get_db),\n):\n paths: dict[str, Optional[ArticlePath]] = {}\n ppd = multi_target_bfs(db, src)\n for dst in dsts:\n dst_id = title_to_id(db, dst)\n path = follow_parent_pointers(dst_id, ppd)\n if path is None:\n paths[dst] = None\n continue\n article_path = []\n for article_id in path:\n article_title = id_to_title(db, article_id)\n article_url = f\"https://en.wikipedia.org/?curid={article_id}\"\n article_path.append(\n ArticleWrapper(\n id=article_id,\n title=article_title,\n link=article_url, # type: ignore\n )\n )\n paths[dst] = ArticlePath(articles=article_path)\n return ManyArticlePaths(paths=paths)",
"def _copy_paths(self, paths, source, destination, output_path,\r\n final_path=None):\r\n for path in paths:\r\n if final_path:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, final_path))\r\n else:\r\n copy(os.path.join(source, path),\r\n os.path.join(output_path, destination, path))",
"def _populate_mirrored(table):\n complete = {}\n for key, paths in table.items():\n complete[key] = paths;\n\n # most likely a symbol background\n if not isinstance(key, tuple):\n continue\n\n swapped = (key[1], key[0])\n\n # check if mirrored is defined\n # if yes, skip item, otherwise produce mirrored\n if swapped in table:\n continue\n \n mirrored = []\n for path in paths:\n mirrored.append(mirror_path(path))\n\n complete[swapped] = list(reversed(mirrored))\n \n return complete",
"def cache_links(self):\n for source_location in self.gen_locations():\n for vi, delta in vi_delta_pairs:\n drow, dcol = delta\n for command, magnitude in ((vi, 1), (vi.upper(), 8)):\n target_location = source_location\n for i in range(magnitude):\n trow, tcol = target_location\n next_target_location = (trow + drow, tcol + dcol)\n if self.is_inbounds(next_target_location):\n target_location = next_target_location\n else:\n break\n triple = (source_location, target_location, command)\n self.cached_links.append(triple)",
"def pairing(self):\n if len(self._paths) == 0:\n second_values = self.data\n get_flight = lambda x: x\n first = True\n else:\n second_values = self._paths\n get_flight = lambda x: x.get_last_flight()\n first = False\n\n for value in second_values:\n f1 = get_flight(value)\n for f2 in self.data:\n if f1.connects_to(f2):\n if first:\n self._paths.append(FlightPath(f1, f2))\n else:\n path_copy = copy.copy(value)\n added = path_copy.try_add(f2)\n if added:\n self._paths.append(path_copy)",
"def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df",
"def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path",
"def just_create_paths(graph):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n fl2t = p\n id2bad = {}\n while p.next_line != len(graph.lines):\n #if trip_id > 30:\n # return\n print trip_id\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n first,last = p.first_last\n \"\"\"\n simple = graph.is_simple(p.edges[:],first,last)\n if not simple or p.edges.count(1) == 0:\n #print \"%d: (%d,%d)\" % (trip_id,first,last)\n #graph.draw_grid(p.edges)\n id2bad[trip_id] = True\n \"\"\"\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n\n #print len(id2bad.keys())\n #with open('pickles/trip_id2bad-%d-%d.pickle' % (graph.rows,graph.cols),'wb') as output:\n # pickle.dump(id2bad,output)\n with open('psdd/better_pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)",
"def optimizedRoutePossibilities2(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tpath = find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tif couple[0] in graph[path[-1]]:\n\t\t\t\tyield path",
"def test_find_multitable_conflicting_paths(self):\n ruleset_a = [\n Rule(priority=10, table=0,\n match=Match([('VLAN_VID', 1, None)]),\n instructions=Instructions(dup=goto1)),\n Rule(priority=10, table=0,\n match=Match([('VLAN_VID', 2, None)]),\n instructions=Instructions(dup=goto2)),\n Rule(priority=0, table=0),\n Rule(priority=20, table=1,\n match=Match([('IPV4_DST', 0, 0xFFFFFFFE)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=19, table=1,\n match=Match([('IPV4_DST', 0, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=0, table=1),\n Rule(priority=30, table=2,\n match=Match([('IPV4_DST', 0, None)]),\n instructions=Instructions()),\n Rule(priority=30, table=2,\n match=Match([('IPV4_DST', 1, None)]),\n instructions=Instructions()),\n Rule(priority=0, table=2)\n ]\n\n ruleset_b = [\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 1, None), ('IPV4_DST', 0, None)])),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 1, None), ('IPV4_DST', 1, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 2, None), ('IPV4_DST', 0, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=14, table=0,\n match=Match([('VLAN_VID', 2, None), ('IPV4_DST', 1, None)])),\n Rule(priority=0, table=0)\n ]\n\n single_a = to_single_table(ruleset_a)\n single_b = to_single_table(ruleset_b)\n norm_a = normalise(single_a)\n norm_b = normalise(single_b)\n\n result_ab = {\n (ruleset_a[0], ruleset_a[3]): frozenset([(ruleset_b[0],)]),\n (ruleset_a[1], ruleset_a[6]): frozenset([(ruleset_b[2],)])\n }\n result_ba = {\n (ruleset_b[0],): frozenset([(ruleset_a[0], ruleset_a[3])]),\n (ruleset_b[2],): frozenset([(ruleset_a[1], ruleset_a[6])])\n }\n\n equal_ab, diff_ab = check_equal(norm_a, norm_b, diff=True)\n self.assertFalse(equal_ab)\n equal_ba, diff_ba = check_equal(norm_b, norm_a, diff=True)\n self.assertFalse(equal_ba)\n\n paths_ab = find_conflicting_paths(diff_ab, single_a, single_b)\n paths_ba = find_conflicting_paths(diff_ab, single_b, single_a)\n\n self.assertEqual(paths_ab, result_ab)\n self.assertNotEqual(paths_ab, result_ba) # Sanity\n self.assertEqual(paths_ba, result_ba)",
"def test_find_rewrite_conflicting_paths(self):\n inst_a = Instructions()\n inst_a.goto_table = 1\n inst_a.apply_actions.append(\"SET_FIELD\", (\"VLAN_VID\", 1))\n # Note: Set VLAN applies the present bit mask so must included it\n ruleset_a = [\n Rule(priority=10, table=0,\n match=Match([('VLAN_VID', 0x1000 | 0, None)]),\n instructions=Instructions(dup=inst_a)),\n Rule(priority=0, table=0),\n Rule(priority=20, table=1,\n match=Match([('VLAN_VID', 0x1000 | 0, None)]),\n instructions=Instructions(dup=output1)),\n Rule(priority=20, table=1,\n match=Match([('VLAN_VID', 0x1000 | 1, None)])),\n Rule(priority=0, table=1)\n ]\n ruleset_b = [\n Rule(priority=0, table=0, instructions=Instructions(dup=output1))\n ]\n single_a = to_single_table(ruleset_a)\n single_b = to_single_table(ruleset_b)\n norm_a = normalise(single_a)\n norm_b = normalise(single_b)\n\n # Make sure the frozensets are made after to_single_table which changes\n # priorities which changes the Rule's hash in the frozenset\n result_ab = {\n (ruleset_a[0], ruleset_a[3]): frozenset([(ruleset_b[0],)]),\n (ruleset_a[1],): frozenset([(ruleset_b[0],)])\n }\n result_ba = {\n (ruleset_b[0],): frozenset([(ruleset_a[0], ruleset_a[3]),\n (ruleset_a[1],)])\n }\n\n equal_ab, diff_ab = check_equal(norm_a, norm_b, diff=True)\n self.assertFalse(equal_ab)\n\n paths_ab = find_conflicting_paths(diff_ab, single_a, single_b)\n paths_ba = find_conflicting_paths(diff_ab, single_b, single_a)\n self.assertEqual(paths_ab, result_ab)\n self.assertNotEqual(paths_ab, result_ba) # Sanity check\n self.assertEqual(paths_ba, result_ba)",
"def route(vertices_resources, nets, machine, constraints,\n placements, allocations, algorithm, core_resource):\n if algorithm == \"default\":\n module = \"rig.place_and_route\"\n algorithm = \"default\"\n else:\n module = \"rig.place_and_route.route.{}\".format(algorithm)\n \n try:\n router = getattr(import_module(module), \"route\")\n except (ImportError, AttributeError):\n sys.stderr.write(\n \"Routing algorithm {} does not exist\\n\".format(algorithm))\n sys.exit(1)\n \n logger.info(\"Routing netlist using '{}'...\".format(algorithm))\n \n before = time.time()\n routes = router(vertices_resources, nets, machine, constraints,\n placements, allocations, core_resource)\n after = time.time()\n \n logger.info(\"Routed netlist in {:.2f}s\".format(after - before))\n \n return routes",
"def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)",
"def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]",
"def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])",
"def set_extra_paths(paths, board):\n new_board = board.copy()\n\n for path in paths:\n new_board[path[\"src\"]] = {}\n new_board[path[\"src\"]][path[\"dst\"]] = distance(path[\"src\"], path[\"dst\"])\n\n return new_board",
"async def path_from_src_to_dst(\n src: str = Query(..., description=\"starting article\"),\n dst: str = Query(..., description=\"destination article\"),\n db: Session = Depends(database.get_db),\n):\n try:\n path = bidi_bfs(db, src, dst)\n except ValueError:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Could not find matching article for at least one of {src} and {dst}\",\n )\n if path is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"No path found between {src} and {dst}\",\n )\n article_path = []\n for article_title in path:\n article_id = title_to_id(db, article_title)\n article_url = f\"https://en.wikipedia.org/?curid={article_id}\"\n article_path.append(\n ArticleWrapper(\n id=article_id,\n title=article_title,\n link=article_url, # type: ignore\n )\n )\n return ArticlePath(articles=article_path)",
"def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover",
"def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path",
"def loadPaths(self):\n for ij in self.link:\n self.link[ij].flow = 0\n for p in self.path:\n for ij in self.path[p].links:\n self.link[ij].flow += self.path[p].flow\n for ij in self.link:\n self.link[ij].updateCost()\n for p in self.path:\n self.path[p].updateCost()",
"def paths(self, paths):\r\n self._paths = paths\r\n self._extract()",
"def add_path_target(self, paths):\n for p in paths:\n self.rg.set_target(p)\n self.rg.set_blocked(p,False)",
"def copy_paths(src, dst, paths, *, exclude=None):\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files",
"def mirror_batch(self, representations):\n filehandles = []\n requests = []\n representations_by_response_url = dict()\n \n for representation in representations:\n if not representation.mirror_url:\n representation.mirror_url = representation.url\n # Turn the mirror URL into an s3.amazonaws.com URL.\n bucket, filename = self.bucket_and_filename(\n representation.mirror_url\n )\n response_url = self.url(bucket, filename)\n representations_by_response_url[response_url] = (\n representation)\n bucket, remote_filename = self.bucket_and_filename(\n representation.mirror_url)\n fh = representation.content_fh()\n filehandles.append(fh)\n request = self.pool.upload(remote_filename, fh, bucket=bucket,\n content_type=representation.media_type)\n requests.append(request)\n # Do the upload.\n\n def process_response(response):\n representation = representations_by_response_url[response.url]\n if response.status_code == 200:\n source = representation.local_content_path\n if representation.url != representation.mirror_url:\n source = representation.url\n if source:\n print \"MIRRORED %s => %s\" % (\n source, representation.mirror_url)\n else:\n print \"MIRRORED %s\" % representation.mirror_url\n representation.set_as_mirrored()\n else:\n representation.mirrored_at = None\n representation.mirror_exception = \"Status code %d: %s\" % (\n response.status_code, response.content)\n\n try:\n for response in self.pool.as_completed(requests):\n process_response(response)\n except ConnectionError, e:\n # This is a transient error; we can just try again.\n print e\n pass\n except HTTPError, e:\n # Probably also a transient error. In any case\n # there's nothing we can do about it but try again.\n print e\n pass\n\n # Close the filehandles\n for fh in filehandles:\n fh.close()",
"def _generate_ribs(self):\n for fw in self._fw_rules:\n source_tag = fw['source_tag']\n dest_tag = fw['dest_tag']\n\n for source_vm_index in self._tag_owners[source_tag]:\n for dest_vm_index in self._tag_owners[dest_tag]:\n # Add to each vertex access ability nodes\n self._graph[source_vm_index].add(dest_vm_index)",
"def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)",
"def mirror_path(path):\n points, env = path\n \n points = [mirror(p) for p in points]\n if env == 'c':\n env = 'l'\n elif env == 'l':\n env = 'c'\n else:\n raise Exception('unknown env in table')\n\n return (points, env)",
"def save_converted_paths(\n src_trace_tuples: Sequence[TraceTuple],\n dest_trace_tuples: Sequence[TraceTuple],\n driver: str,\n destination: Path,\n):\n for src_trace_tuple, dest_trace_tuple in zip(src_trace_tuples, dest_trace_tuples):\n for original_path, convert_path in zip(\n (src_trace_tuple.traces_path, src_trace_tuple.area_path),\n (dest_trace_tuple.traces_path, dest_trace_tuple.area_path),\n ):\n convert_filetype(original_path, destination / convert_path, driver=driver)",
"def get_bundle_corner(\n ports1: List[Port],\n ports2: List[Port],\n route_filter: Callable[..., Route] = get_route_from_waypoints,\n separation: float = 5.0,\n path_length_match_loops: int = None,\n path_length_match_extra_length: float = 0.0,\n path_length_match_modify_segment_i: int = -2,\n **kwargs,\n) -> List[Route]:\n if \"straight\" in kwargs.keys():\n _ = kwargs.pop(\"straight\")\n\n routes = _get_bundle_corner_waypoints(\n ports1,\n ports2,\n routing_func=generate_manhattan_waypoints,\n separation=separation,\n **kwargs,\n )\n if path_length_match_loops:\n routes = [np.array(route) for route in routes]\n routes = path_length_matched_points(\n routes,\n extra_length=path_length_match_extra_length,\n nb_loops=path_length_match_loops,\n modify_segment_i=path_length_match_modify_segment_i,\n **kwargs,\n )\n\n return [route_filter(r, **kwargs) for r in routes]",
"def rebuild_path(self, node_map: dict = None, src: int = 0, dest: int = 0) -> list:\n if node_map is None or src == dest:\n return None\n ans = [self._graph.get_node(dest)]\n next_node = node_map.get(dest)\n ans.append(next_node)\n while next_node.key is not src: # Backtrack from dest to src\n ans.append(node_map.get(next_node.key))\n next_node = node_map.get(next_node.key)\n if self._graph.get_node(src) not in ans:\n ans.append(self._graph.get_node(src))\n\n ans.reverse() # Inserted from\n return ans"
]
| [
"0.5787563",
"0.5532622",
"0.52188444",
"0.51296246",
"0.51131874",
"0.5054744",
"0.50351787",
"0.5021035",
"0.5006877",
"0.49568886",
"0.49561077",
"0.4940913",
"0.49389237",
"0.49387565",
"0.49158677",
"0.48937976",
"0.48498455",
"0.48478934",
"0.4830131",
"0.48278594",
"0.48179886",
"0.4811002",
"0.4778729",
"0.47591966",
"0.4756245",
"0.47534677",
"0.47441268",
"0.47319207",
"0.47301394",
"0.4729522"
]
| 0.73886305 | 0 |
Create new endpoint mirror helper with P2E (propert to entity mapping) support Like LDFMirror but fetches property entities as well. graph target RDFLib graph endpoints dict mapping host names to LDF endpoints, e.g. { | def __init__ (self, graph, endpoints, aliases, prefixes, p2e_mapper ):
self.p2e_mapper = p2e_mapper
super (LDFMirrorP2E, self).__init__(graph, endpoints, aliases, prefixes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def endpoint_definition(domain, resource):\n ret = {}\n ret['description'] = resource.get('description', {})\n ret['paths'] = paths(domain, resource)\n return ret",
"def generate_proxy(classname, endpoints):\n # Replace path vars like (?<schemaname>.*) with {schemaname} for Retrofit's annotation\n var_pattern = re.compile(r\"\\{(\\w+)\\}\")\n\n helper_class = []\n found_key_array_parameter = False\n\n yield \"/*\"\n yield \" * This file is auto-generated by h2o-3/h2o-bindings/bin/gen_java.py\"\n yield \" * Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)\"\n yield \" */\"\n yield \"package water.bindings.proxies.retrofit;\"\n yield \"\"\n yield \"import water.bindings.pojos.*;\"\n yield \"import retrofit2.*;\"\n yield \"import retrofit2.http.*;\"\n yield \"import java.util.Map;\" if classname == \"Grid\" or classname == \"ModelBuilders\" else None\n yield \"\"\n yield \"public interface \" + classname + \" {\"\n yield \"\"\n\n for e in endpoints:\n method = e[\"handler_method\"]\n # should we always use e.api_name ?\n if method == \"exec\":\n method = e[\"api_name\"]\n\n param_strs = []\n required_param_strs = []\n for field in e[\"input_params\"]:\n fname = field[\"name\"]\n if field[\"is_path_param\"]:\n ftype = \"Path\"\n else:\n if e[\"http_method\"] == \"GET\":\n ftype = \"Query\"\n else:\n ftype = \"Field\"\n ptype = translate_type(field[\"type\"], field[\"schema_name\"])\n if ptype.endswith(\"KeyV3\") or ptype == \"ColSpecifierV3\": ptype = \"String\"\n if ptype.endswith(\"KeyV3[]\"): ptype = \"String[]\"\n param_str = \"@{ftype}(\\\"{fname}\\\") {ptype} {fname}\".format(**locals())\n param_strs.append(param_str)\n if field[\"required\"]:\n required_param_strs.append(param_str)\n if len(param_strs) == len(required_param_strs): required_param_strs = None\n\n yield u\" /** \"\n yield bi.wrap(e[\"summary\"], indent=\" * \")\n for field in e[\"input_params\"]:\n s = \" * @param %s \" % field[\"name\"]\n yield s + bi.wrap(field[\"help\"], indent=\" *\" + \" \" * (len(s) - 4), indent_first=False)\n yield u\" */\"\n # Create 2 versions of each call: first with all input parameters present, and then only with required params\n for params in [param_strs, required_param_strs]:\n if params is None: continue\n yield u\" @FormUrlEncoded\" if e[\"http_method\"] == \"POST\" else None\n yield u\" @{method}(\\\"{path}\\\")\".format(method=e[\"http_method\"], path=e[\"url_pattern\"])\n if len(params) <= 1:\n args = params[0] if params else \"\"\n yield \" Call<{schema}> {method}({args});\".format(schema=e[\"output_schema\"], method=method, args=args)\n else:\n yield \" Call<{schema}> {method}(\".format(schema=e[\"output_schema\"], method=method)\n for arg in params:\n yield \" \" + arg + (\"\" if arg == params[-1] else \",\")\n yield \" );\"\n yield \"\"\n\n # Make special static Helper class for Grid and ModelBuilders.\n if \"algo\" in e:\n # We make two train_ and validate_ methods. One (built here) takes the parameters schema, the other\n # (built above) takes each parameter.\n helper_class.append(\" /**\")\n helper_class.append(bi.wrap(e[\"summary\"], indent=\" * \"))\n helper_class.append(\" */\")\n helper_class.append(\" public static Call<{oschema}> {method}({outer_class} z, {ischema} p) {{\"\n .format(ischema=e[\"input_schema\"], oschema=e[\"output_schema\"], method=method,\n outer_class=classname))\n helper_class.append(\" return z.{method}(\".format(method=method))\n for field in e[\"input_params\"]:\n ptype = translate_type(field[\"type\"], field[\"schema_name\"])\n pname = translate_name(field[\"name\"])\n if ptype.endswith(\"KeyV3\"):\n s = \"(p.{parm} == null? null : p.{parm}.name)\".format(parm=pname)\n elif ptype.endswith(\"KeyV3[]\"):\n found_key_array_parameter = True\n s = \"(p.{parm} == null? null : keyArrayToStringArray(p.{parm}))\".format(parm=pname)\n elif ptype == \"ColSpecifierV3\":\n s = \"(p.{parm} == null? null : p.{parm}.columnName)\".format(parm=pname)\n else:\n s = \"p.\" + pname\n if field != e[\"input_params\"][-1]:\n s += \",\"\n helper_class.append(\" \" + s)\n helper_class.append(\" );\")\n helper_class.append(\" }\")\n helper_class.append(\"\")\n\n if helper_class:\n yield \"\"\n yield \" @SuppressWarnings(\\\"unused\\\")\"\n yield \" class Helper {\"\n for line in helper_class:\n yield line\n if found_key_array_parameter:\n yield \" /**\"\n yield \" * Return an array of Strings for an array of keys.\"\n yield \" */\"\n yield \" public static String[] keyArrayToStringArray(KeyV3[] keys) {\"\n yield \" if (keys == null) return null;\"\n yield \" String[] ids = new String[keys.length];\"\n yield \" int i = 0;\"\n yield \" for (KeyV3 key : keys) ids[i++] = key.name;\"\n yield \" return ids;\"\n yield \" }\"\n yield \" }\"\n yield \"\"\n\n yield \"}\"",
"def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)",
"def _get_updated_endpoints(original_end_points, name):\n\n end_points = dict(original_end_points)\n end_points['logits'] = tf.squeeze(end_points[name], [1, 2])\n end_points['probs'] = tf.nn.softmax(end_points['logits'])\n\n return end_points",
"def _get_hostendpoints(self, host, intf_ep, config):\n\n for uuid in intf_ep.keys():\n\n intf = intf_ep[uuid][0]\n iftype = intf_ep[uuid][1]\n\n host_endpoints = dict()\n hep_name = host.hostname + \"-\" + intf.ifname + \"-if-hep\"\n\n host_endpoints[\"apiVersion\"] = \"crd.projectcalico.org/v1\"\n host_endpoints[\"kind\"] = \"HostEndpoint\"\n host_endpoints.update({\"metadata\": dict()})\n host_endpoints[\"metadata\"].update({\"name\": hep_name})\n host_endpoints[\"metadata\"].update({\"labels\": dict()})\n host_endpoints[\"metadata\"][\"labels\"].update({\"nodetype\": host.personality})\n host_endpoints[\"metadata\"][\"labels\"].update({\"ifname\":\n f\"{host.hostname}.{intf.ifname}\"})\n host_endpoints[\"metadata\"][\"labels\"].update({\"iftype\": iftype})\n\n host_endpoints.update({\"spec\": dict()})\n host_endpoints[\"spec\"].update({\"node\": host.hostname})\n interfaceName = puppet_intf.get_interface_os_ifname(self.context, intf)\n host_endpoints[\"spec\"].update({\"interfaceName\": interfaceName})\n\n # adding only for OAM for compatibility with old implementation\n if constants.NETWORK_TYPE_OAM in iftype:\n hep_name = host.hostname + \"-oam-if-hep\"\n host_endpoints[\"metadata\"][\"name\"] = hep_name\n self._add_hep_expected_ip(host, constants.NETWORK_TYPE_OAM, host_endpoints)\n\n config[hep_name] = copy.copy(host_endpoints)",
"def __SetEndpoints(self,\n version):\n\n if version==2:\n endpoints = {\"heads\":'top-headlines?',\"search\":'everything?',\"source\":'sources?'}\n elif version==1:\n endpoints = {\"search\":'articles?',\"source\":'sources?'}\n\n return endpoints",
"def seperate_endpoints(endpoints):\n seperated_endpoints = []\n\n # Seperate the list of endpoints to have unique methods and endpoints\n for endpoint in endpoints:\n for ep in endpoint['endpoints']:\n if not endpoint['methods']:\n # If there's no method set it to GET\n endpoint['methods'] = ['GET']\n for method in endpoint['methods']:\n tempDict = {\n 'endpoint': ep,\n 'method': method,\n 'plugin': endpoint['plugin'],\n 'params': endpoint['params'] or [],\n 'templates': list(set(endpoint['templates'])) or [],\n 'headers': endpoint['headers'] if 'headers' in endpoint else [],\n 'filepath': endpoint['filepath'] or None,\n 'line_number': endpoint['line_number'] if 'line_number' in endpoint else None\n }\n seperated_endpoints.append(tempDict)\n \n return seperated_endpoints",
"def __init__ (self, graph, endpoints, aliases, prefixes):\n\n self.graph = graph\n self.endpoints = endpoints\n self.aliases = aliases\n self.prefixes = prefixes",
"def create_directly_follows_graph(log):\n dfg = dfg_discovery.apply(log)\n dfg = dict(dfg)\n return dfg",
"def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm",
"def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result",
"def edge_mapping(self):\n ...",
"def _attach_endpoints(self):\n for name, endpoint in inspect.getmembers(self):\n is_class = inspect.isclass(endpoint)\n is_subclass = is_class and issubclass(endpoint, self.Endpoint)\n not_endpoint = endpoint is not self.Endpoint\n\n if is_subclass and not_endpoint:\n endpoint_instance = endpoint(self.session)\n setattr(self, name.lower(), endpoint_instance)",
"def get_endpoints(self, **kwargs):\n return self._database.lookup('endpoint', kwargs)",
"def __init__(self, endpoint_a, endpoint_b):\n self.endpoint_a = endpoint_a\n self.endpoint_b = endpoint_b",
"def _pipe(self, nodes):\n # Collect all vertices, handling special named parameter targets separately.\n vertices = []\n for item in nodes:\n if isinstance(item, EdgeDef):\n vertices.append(EdgeDef(self._store_node(item.node), item.param))\n else:\n vertices.append(EdgeDef(self._store_node(item), None))\n\n for i in range(1, len(vertices)):\n source = vertices[i - 1]\n target = vertices[i]\n\n self._downstream[source.node].append(target)\n self._upstream[target.node].append(EdgeDef(source.node, target.param))",
"def decompose(cls, endpoint):\n match = cls.RE_ENDPOINT.match(endpoint)\n\n if not match:\n raise EndpointError('Invalid endpoint: %s' % endpoint)\n\n target = (match.group(3) or '').strip()\n\n return {\n 'name': (match.group(1) or '').strip(),\n 'source': (match.group(2) or '').strip(),\n 'target': '*' if cls.is_wildcard(target) else target\n }",
"def get_endpoint_data(self, session,\n endpoint_override=None,\n discover_versions=True,\n **kwargs):\n return super(FixedEndpointPlugin, self).get_endpoint_data(\n session,\n endpoint_override=endpoint_override or self.endpoint,\n discover_versions=discover_versions,\n **kwargs)",
"def make_endpoint(self, collected: List[ParsedData]) -> Endpoint:\n # We split the gathered data among all locations & store the original parameter\n containers = {\n location: {\n parameter.name: {\"options\": [], \"parameter\": parameter}\n for parameter in getattr(self.endpoint, container_name)\n }\n for location, container_name in LOCATION_TO_CONTAINER.items()\n }\n # There might be duplicates in the data\n for item in set(collected):\n for name, value in item.parameters.items():\n container = self._get_container_by_parameter_name(name, containers)\n container.append(value)\n # These are the final `path_parameters`, `query`, and other endpoint components\n components: Dict[str, ParameterSet] = {\n container_name: getattr(self.endpoint, container_name).__class__()\n for location, container_name in LOCATION_TO_CONTAINER.items()\n }\n # Here are all components that are filled with parameters\n for location, parameters in containers.items():\n for name, parameter_data in parameters.items():\n if parameter_data[\"options\"]:\n definition = deepcopy(parameter_data[\"parameter\"].definition)\n if \"schema\" in definition:\n # The actual schema doesn't matter since we have a list of allowed values\n definition[\"schema\"] = {\"enum\": parameter_data[\"options\"]}\n else:\n # Other schema-related keywords will be ignored later, during the canonicalisation step\n # inside `hypothesis-jsonschema`\n definition[\"enum\"] = parameter_data[\"options\"]\n components[LOCATION_TO_CONTAINER[location]].add(parameter_data[\"parameter\"].__class__(definition))\n else:\n # No options were gathered for this parameter - use the original one\n components[LOCATION_TO_CONTAINER[location]].add(parameter_data[\"parameter\"])\n return self.endpoint.clone(**components)",
"def copy_from_entity(self, entity):\n for prop in entity._EndpointsPropertyItervalues():\n attr_name = prop._code_name\n value = getattr(entity, attr_name)\n if value is not None:\n if isinstance(prop, properties.EndpointsAliasProperty):\n value_set = getattr(self, attr_name) is not None\n elif isinstance(prop, ComputedProperty):\n value_set = True\n else:\n value_set = prop._name in self._values\n if not value_set:\n setattr(self, attr_name, value)",
"def __init__(self, *nodes, **properties):\n num_args = len(nodes)\n if num_args == 0:\n raise TypeError(\"Relationships must specify at least one endpoint\")\n elif num_args == 1:\n # Relationship(a)\n self._type = self.default_type()\n nodes = (nodes[0], nodes[0])\n elif num_args == 2:\n if nodes[1] is None or isinstance(nodes[1], string):\n # Relationship(a, \"TO\")\n self._type = nodes[1]\n nodes = (nodes[0], nodes[0])\n else:\n # Relationship(a, b)\n self._type = self.default_type()\n nodes = (nodes[0], nodes[1])\n elif num_args == 3:\n # Relationship(a, \"TO\", b)\n self._type = nodes[1]\n nodes = (nodes[0], nodes[2])\n else:\n raise TypeError(\"Hyperedges not supported\")\n Entity.__init__(self, nodes[0], self, nodes[1], **properties)",
"def test_transform_and_load_gcp_forwarding_rules(neo4j_session):\n fwd_res = tests.data.gcp.compute.LIST_FORWARDING_RULES_RESPONSE\n fwd_list = cartography.intel.gcp.compute.transform_gcp_forwarding_rules(fwd_res)\n cartography.intel.gcp.compute.load_gcp_forwarding_rules(neo4j_session, fwd_list, TEST_UPDATE_TAG)\n\n fwd_query = \"\"\"\n MATCH(f:GCPForwardingRule)\n RETURN f.id, f.partial_uri, f.ip_address, f.ip_protocol, f.load_balancing_scheme, f.name, f.network, f.port_range,\n f.ports, f.project_id, f.region, f.self_link, f.subnetwork, f.target\n \"\"\"\n objects = neo4j_session.run(fwd_query)\n actual_nodes = {\n (\n o['f.id'],\n o['f.ip_address'],\n o['f.ip_protocol'],\n o['f.load_balancing_scheme'],\n o['f.name'],\n o.get('f.port_range', None),\n ','.join(o.get('f.ports', None)) if o.get('f.ports', None) else None,\n o['f.project_id'],\n o['f.region'],\n o['f.target'],\n ) for o in objects\n }\n\n expected_nodes = {\n (\n 'projects/project-abc/regions/europe-west2/forwardingRules/internal-service-1111',\n '10.0.0.10',\n 'TCP',\n 'INTERNAL',\n 'internal-service-1111',\n None,\n '80',\n 'project-abc',\n 'europe-west2',\n 'projects/project-abc/regions/europe-west2/targetPools/node-pool-12345',\n ),\n (\n 'projects/project-abc/regions/europe-west2/forwardingRules/public-ingress-controller-1234567',\n '1.2.3.11',\n 'TCP',\n 'EXTERNAL',\n 'public-ingress-controller-1234567',\n '80-443',\n None,\n 'project-abc',\n 'europe-west2',\n 'projects/project-abc/regions/europe-west2/targetVpnGateways/vpn-12345',\n ),\n (\n 'projects/project-abc/regions/europe-west2/forwardingRules/shard-server-22222',\n '10.0.0.20',\n 'TCP',\n 'INTERNAL',\n 'shard-server-22222',\n None,\n '10203',\n 'project-abc',\n 'europe-west2',\n 'projects/project-abc/regions/europe-west2/targetPools/node-pool-234567',\n ),\n }\n\n assert actual_nodes == expected_nodes",
"def mobilenet_v2_base(inputs,\n final_endpoint='conv2d_8',\n output_stride=None,\n min_depth=8,\n depth_multiplier=1.0,\n scope=None):\n end_points = {}\n\n conv_defs = _CONV_DEFS\n\n #if output_stride is not None and output_stride not in [8, 16, 32]:\n # raise ValueError('Only allowed output_stride values are 8, 16, 32.')\n\n bottleneck_id=0\n with tf.variable_scope(scope, default_name='MobilenetV2',values=[inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):\n net = inputs\n for i, conv_def in enumerate(conv_defs):\n\n layer_stride = conv_def.stride\n layer_rate = 1\n\n if isinstance(conv_def, Conv):\n end_point= 'conv2d_%d' % i\n net = slim.conv2d(net, conv_def.depth, conv_def.kernel,\n stride=conv_def.stride,\n normalizer_fn=slim.batch_norm,\n scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n elif isinstance(conv_def, InvResConv):\n for i in conv_def.repeat:\n end_point = 'bottleneck_%d' % bottleneck_id\n bottleneck_id+=1\n if i == 0 :\n net = bottleneck(net, conv_def.expansion, conv_def.depth,\n conv_def.stride, scope=end_point)\n else:\n net = bottleneck(net, conv_def.expansion, conv_def.depth,\n 1, scope=end_point)\n\n end_points[end_point]=net\n if end_point == final_endpoint:\n return net, end_points\n else:\n raise ValueError('Unknown convolution type %s for layer %d'\n % (conv_def.ltype, i))\n\n\n raise ValueError('Unknown final endpoint %s' % final_endpoint)",
"def createEdge(lines, list):\n res = lines.split('\\\\n')\n mains = res[0].split(' ')\n sid = mains[3]\n sid = sid[4:-1]\n ssource = mains[4]\n ssource = ssource[8:-1]\n starget = mains[5]\n starget = starget[8:-2]\n slabel = ''\n i = 2\n\n while ('key=' in res[i]):\n i = i + 1\n\n if ('EdgeLabel' in res[i + 4]):\n slabels = res[i + 4].split('>')\n slabel = slabels[1]\n slabel = slabel.split('<')[0]\n slabel = umlautHelper(slabel)\n\n source = findInList(ssource, list)\n target = findInList(starget, list)\n\n nline = Edge(sid, source, target)\n nline.setLabel(slabel)\n\n j = i + 1\n while ('Path' in res[j] or 'Point' in res[j]):\n j = j + 1\n\n allarrows = res[j + 1]\n if ('source=\"standard' in allarrows or 'source=\"delta' in allarrows):\n nline.setArrowSource(True)\n if ('target=\"standard' in allarrows or 'target=\"delta' in allarrows):\n nline.setArrowTarget(True)\n\n if (type(source) == Entity and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Entity and type(source) == Attribute):\n target.addAttribute(source)\n if (type(source) == Relation and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Relation and type(source) == Attribute):\n target.addAttribute(source)\n list.append(nline)",
"def dfs_edges_generator(graph, source, reverse=...):\n ...",
"def endpoint_schema(endpoint, extra_definitions={}):\n # load common schema template and update metadata\n schema = common.load_json(\"./templates/provider/endpoint.json\")\n schema[\"$id\"] = schema[\"$id\"].replace(\"endpoint.json\", f\"{endpoint}.json\")\n schema[\"title\"] = schema[\"title\"].replace(\"endpoint\", endpoint)\n\n # merge custom definitions with relevant common definitions\n definitions = common.load_definitions(\n \"string\",\n \"timestamp\",\n \"uuid\",\n \"version\",\n common.MDS_FEATURE_POINT\n )\n definitions.update(common.point_definition())\n definitions.update(extra_definitions)\n\n endpoint_schema = common.load_json(f\"./templates/provider/{endpoint}.json\")\n\n # for all but stops, merge standard vehicle info with items schema\n if endpoint not in [\"stops\"]:\n items = endpoint_schema[endpoint][\"items\"]\n vehicle = common.vehicle_definition()\n items[\"required\"] = vehicle[\"required\"] + items[\"required\"]\n items[\"properties\"] = { **vehicle[\"properties\"], **items[\"properties\"] }\n definitions.update(common.load_definitions(\"propulsion_type\", \"propulsion_types\", \"vehicle_type\"))\n\n # merge endpoint schema into the endpoint template\n data_schema = schema[\"properties\"][\"data\"]\n data_schema[\"required\"] = [endpoint]\n data_schema[\"properties\"] = endpoint_schema\n\n # insert definitions\n schema[\"definitions\"].update(definitions)\n\n return schema",
"def create_basic_adjacency_map_2():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"],\n \"E\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph",
"def test_tree_linear() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), None)\n )\n field(t, (\"dr_2\", \"ds_2\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), None)\n )\n field(t, (\"dr_3\", \"ds_3\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), None)\n )\n field(t, (\"dr_4\", \"ds_4\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), None)\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert set(incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\"))) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert set(incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\"))) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\"dr_2:ds_2\": {\"f1 -> f1\"}},\n },\n \"dr_2:ds_2\": {\n \"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}},\n \"to\": {\"dr_3:ds_3\": {\"f1 -> f1\"}},\n },\n \"dr_3:ds_3\": {\n \"from\": {\"dr_2:ds_2\": {\"f1 -> f1\"}},\n \"to\": {\"dr_4:ds_4\": {\"f1 -> f1\"}},\n },\n \"dr_4:ds_4\": {\n \"from\": {\"dr_3:ds_3\": {\"f1 -> f1\"}},\n \"to\": {\"dr_5:ds_5\": {\"f1 -> f1\"}},\n },\n \"dr_5:ds_5\": {\"from\": {\"dr_4:ds_4\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert terminators == [CollectionAddress(\"dr_5\", \"ds_5\")]",
"def _choose_endpoints_and_do_request(\n client_params, relation, payload, create_using_pid1=True\n):\n client, json_headers, method = client_params\n pid1, pid1_type, pid2, pid2_type = relation\n\n if create_using_pid1:\n url_create_rel = (TYPES_ENDPOINTS[\"relation\"][pid1_type], pid1)\n url_other = (TYPES_ENDPOINTS[\"get\"][pid2_type], pid2)\n\n record1 = _do_request_for_valid_relation(\n client, json_headers, payload, url_create_rel, method=method\n )\n record2 = _fetch_record(client, json_headers, url_other)\n else:\n url_create_rel = (TYPES_ENDPOINTS[\"relation\"][pid2_type], pid2)\n url_other = (TYPES_ENDPOINTS[\"get\"][pid1_type], pid1)\n\n record2 = _do_request_for_valid_relation(\n client, json_headers, payload, url_create_rel, method=method\n )\n record1 = _fetch_record(client, json_headers, url_other)\n\n return record1, record2",
"def get_endpoint(self, *args):\n\t\traise NotImplementedError"
]
| [
"0.56358564",
"0.5607735",
"0.55940396",
"0.54928416",
"0.53302336",
"0.5305283",
"0.5174993",
"0.5160754",
"0.5151699",
"0.5118873",
"0.5010737",
"0.5006742",
"0.49543652",
"0.49301457",
"0.491819",
"0.48860794",
"0.48694873",
"0.48264706",
"0.48234227",
"0.4815397",
"0.48034042",
"0.47648606",
"0.47627404",
"0.47263202",
"0.4721614",
"0.47061065",
"0.46999413",
"0.46982333",
"0.46782702",
"0.467465"
]
| 0.6947705 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.